var/home/core/zuul-output/0000755000175000017500000000000015111272706014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111277345015477 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000003605566515111277337017724 0ustar rootrootNov 25 09:27:55 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 09:27:55 crc restorecon[4705]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:55 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:27:56 crc restorecon[4705]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 09:27:59 crc kubenswrapper[4734]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:27:59 crc kubenswrapper[4734]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 09:27:59 crc kubenswrapper[4734]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:27:59 crc kubenswrapper[4734]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:27:59 crc kubenswrapper[4734]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 09:27:59 crc kubenswrapper[4734]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.550853 4734 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558561 4734 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558592 4734 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558596 4734 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558600 4734 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558607 4734 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558612 4734 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558617 4734 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558622 4734 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558626 4734 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558629 4734 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558633 4734 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558637 4734 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558640 4734 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558644 4734 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558648 4734 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558651 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558655 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558659 4734 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558662 4734 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558666 4734 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558669 4734 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558673 4734 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558677 4734 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558680 4734 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558684 4734 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558687 4734 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558691 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558695 4734 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558698 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558702 4734 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558705 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558708 4734 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558712 4734 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558715 4734 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558727 4734 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558732 4734 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558736 4734 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558740 4734 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558747 4734 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558752 4734 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558756 4734 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558760 4734 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558764 4734 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558768 4734 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558772 4734 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558775 4734 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558779 4734 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558783 4734 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558787 4734 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558791 4734 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558794 4734 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558798 4734 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558802 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558808 4734 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558812 4734 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558816 4734 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558819 4734 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558823 4734 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558826 4734 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558830 4734 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558833 4734 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558837 4734 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558841 4734 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558844 4734 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558848 4734 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558851 4734 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558855 4734 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558859 4734 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558862 4734 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558867 4734 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.558871 4734 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.558967 4734 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.558979 4734 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.558991 4734 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.558998 4734 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559004 4734 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559009 4734 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559015 4734 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559021 4734 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559026 4734 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559031 4734 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559036 4734 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559067 4734 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559073 4734 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559077 4734 flags.go:64] FLAG: --cgroup-root="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559098 4734 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559103 4734 flags.go:64] FLAG: --client-ca-file="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559108 4734 flags.go:64] FLAG: --cloud-config="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559113 4734 flags.go:64] FLAG: --cloud-provider="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559117 4734 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559128 4734 flags.go:64] FLAG: --cluster-domain="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559132 4734 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559136 4734 flags.go:64] FLAG: --config-dir="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559141 4734 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559146 4734 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559153 4734 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559157 4734 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559162 4734 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559167 4734 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559172 4734 flags.go:64] FLAG: --contention-profiling="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559176 4734 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559181 4734 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559185 4734 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559189 4734 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559195 4734 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559200 4734 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559204 4734 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559208 4734 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559213 4734 flags.go:64] FLAG: --enable-server="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559217 4734 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559226 4734 flags.go:64] FLAG: --event-burst="100" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559231 4734 flags.go:64] FLAG: --event-qps="50" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559235 4734 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559239 4734 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559243 4734 flags.go:64] FLAG: --eviction-hard="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559249 4734 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559253 4734 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559260 4734 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559270 4734 flags.go:64] FLAG: --eviction-soft="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559276 4734 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559280 4734 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559285 4734 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559289 4734 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559293 4734 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559298 4734 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559302 4734 flags.go:64] FLAG: --feature-gates="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559308 4734 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559312 4734 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559317 4734 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559321 4734 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559326 4734 flags.go:64] FLAG: --healthz-port="10248" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559331 4734 flags.go:64] FLAG: --help="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559337 4734 flags.go:64] FLAG: --hostname-override="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559341 4734 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559346 4734 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559351 4734 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559355 4734 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559359 4734 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559364 4734 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559368 4734 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559372 4734 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559376 4734 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559381 4734 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559386 4734 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559391 4734 flags.go:64] FLAG: --kube-reserved="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559395 4734 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559400 4734 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559404 4734 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559409 4734 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559415 4734 flags.go:64] FLAG: --lock-file="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559420 4734 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559424 4734 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559429 4734 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559440 4734 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559452 4734 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559457 4734 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559462 4734 flags.go:64] FLAG: --logging-format="text" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559466 4734 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559471 4734 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559475 4734 flags.go:64] FLAG: --manifest-url="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559479 4734 flags.go:64] FLAG: --manifest-url-header="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559486 4734 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559490 4734 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559496 4734 flags.go:64] FLAG: --max-pods="110" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559500 4734 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559505 4734 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559509 4734 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559513 4734 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559518 4734 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559523 4734 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559527 4734 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559539 4734 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559544 4734 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559549 4734 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559553 4734 flags.go:64] FLAG: --pod-cidr="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559557 4734 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559567 4734 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559572 4734 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559576 4734 flags.go:64] FLAG: --pods-per-core="0" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559581 4734 flags.go:64] FLAG: --port="10250" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559585 4734 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559590 4734 flags.go:64] FLAG: --provider-id="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559594 4734 flags.go:64] FLAG: --qos-reserved="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559599 4734 flags.go:64] FLAG: --read-only-port="10255" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559604 4734 flags.go:64] FLAG: --register-node="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559608 4734 flags.go:64] FLAG: --register-schedulable="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559612 4734 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559621 4734 flags.go:64] FLAG: --registry-burst="10" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559626 4734 flags.go:64] FLAG: --registry-qps="5" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559630 4734 flags.go:64] FLAG: --reserved-cpus="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559639 4734 flags.go:64] FLAG: --reserved-memory="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559646 4734 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559650 4734 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559655 4734 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559659 4734 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559663 4734 flags.go:64] FLAG: --runonce="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559667 4734 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559672 4734 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559676 4734 flags.go:64] FLAG: --seccomp-default="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559681 4734 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559685 4734 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559689 4734 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559693 4734 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559698 4734 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559702 4734 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559706 4734 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559711 4734 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559715 4734 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559720 4734 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559725 4734 flags.go:64] FLAG: --system-cgroups="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559729 4734 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559737 4734 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559741 4734 flags.go:64] FLAG: --tls-cert-file="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559745 4734 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559754 4734 flags.go:64] FLAG: --tls-min-version="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559759 4734 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559763 4734 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559768 4734 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559772 4734 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559777 4734 flags.go:64] FLAG: --v="2" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559783 4734 flags.go:64] FLAG: --version="false" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559789 4734 flags.go:64] FLAG: --vmodule="" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559795 4734 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.559799 4734 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559933 4734 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559939 4734 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559949 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559953 4734 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559957 4734 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559960 4734 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559965 4734 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559970 4734 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559975 4734 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559980 4734 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559985 4734 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559990 4734 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559995 4734 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.559999 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560003 4734 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560007 4734 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560012 4734 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560016 4734 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560020 4734 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560025 4734 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560030 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560034 4734 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560039 4734 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560043 4734 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560047 4734 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560052 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560056 4734 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560060 4734 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560064 4734 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560068 4734 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560072 4734 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560076 4734 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560099 4734 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560103 4734 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560108 4734 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560112 4734 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560115 4734 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560119 4734 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560128 4734 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560132 4734 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560135 4734 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560139 4734 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560143 4734 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560146 4734 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560150 4734 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560154 4734 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560157 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560161 4734 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560165 4734 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560169 4734 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560173 4734 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560176 4734 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560180 4734 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560184 4734 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560189 4734 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560193 4734 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560196 4734 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560200 4734 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560204 4734 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560207 4734 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560211 4734 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560214 4734 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560218 4734 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560221 4734 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560225 4734 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560228 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560232 4734 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560236 4734 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560240 4734 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560244 4734 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.560247 4734 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.560253 4734 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.663999 4734 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.664339 4734 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664409 4734 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664419 4734 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664423 4734 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664428 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664433 4734 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664439 4734 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664444 4734 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664448 4734 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664452 4734 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664456 4734 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664460 4734 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664464 4734 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664468 4734 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664472 4734 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664475 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664480 4734 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664485 4734 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664488 4734 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664492 4734 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664496 4734 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664499 4734 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664503 4734 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664506 4734 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664510 4734 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664514 4734 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664517 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664520 4734 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664524 4734 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664528 4734 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664531 4734 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664535 4734 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664541 4734 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664546 4734 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664551 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664555 4734 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664559 4734 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664562 4734 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664566 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664570 4734 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664573 4734 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664577 4734 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664580 4734 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664584 4734 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664587 4734 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664591 4734 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664594 4734 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664598 4734 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664601 4734 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664604 4734 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664608 4734 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664611 4734 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664615 4734 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664619 4734 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664624 4734 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664628 4734 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664631 4734 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664635 4734 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664639 4734 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664643 4734 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664647 4734 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664651 4734 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664655 4734 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664658 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664662 4734 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664665 4734 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664669 4734 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664673 4734 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664677 4734 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664680 4734 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664684 4734 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664687 4734 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.664694 4734 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664821 4734 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664831 4734 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664836 4734 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664841 4734 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664845 4734 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664850 4734 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664853 4734 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664857 4734 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664861 4734 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664865 4734 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664870 4734 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664874 4734 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664878 4734 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664883 4734 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664888 4734 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664895 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664899 4734 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664903 4734 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664908 4734 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664913 4734 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664918 4734 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664922 4734 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664927 4734 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664932 4734 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664938 4734 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664943 4734 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664948 4734 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664952 4734 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664956 4734 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664960 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664965 4734 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664969 4734 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664974 4734 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664978 4734 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664981 4734 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664986 4734 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664990 4734 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664995 4734 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.664999 4734 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665005 4734 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665010 4734 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665014 4734 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665018 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665022 4734 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665027 4734 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665031 4734 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665037 4734 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665041 4734 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665046 4734 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665052 4734 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665058 4734 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665064 4734 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665068 4734 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665071 4734 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665076 4734 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665095 4734 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665100 4734 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665104 4734 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665111 4734 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665117 4734 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665122 4734 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665127 4734 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665133 4734 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665138 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665142 4734 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665148 4734 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665153 4734 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665158 4734 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665162 4734 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665166 4734 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:27:59 crc kubenswrapper[4734]: W1125 09:27:59.665171 4734 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.665179 4734 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.665416 4734 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.717207 4734 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.717331 4734 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.718937 4734 server.go:997] "Starting client certificate rotation" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.718960 4734 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.722811 4734 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-21 11:38:32.488725181 +0000 UTC Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.722955 4734 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.781629 4734 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:27:59 crc kubenswrapper[4734]: E1125 09:27:59.785718 4734 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.787794 4734 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:27:59 crc kubenswrapper[4734]: I1125 09:27:59.832548 4734 log.go:25] "Validated CRI v1 runtime API" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.088039 4734 log.go:25] "Validated CRI v1 image API" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.090923 4734 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.108007 4734 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-09-23-28-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.108055 4734 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.127757 4734 manager.go:217] Machine: {Timestamp:2025-11-25 09:28:00.11963115 +0000 UTC m=+2.930093164 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:19343fc1-91cb-4eae-8f56-eacf25f0be5a BootID:313cb2d5-19b9-400d-8416-99d9919180d4 Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:40:3c:bc Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:40:3c:bc Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:0b:9e:d8 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:66:60:90 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:d5:31:bb Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:85:b6:21 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:9e:64:c6:db:06:11 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:96:b2:de:04:a3:0b Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.128058 4734 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.128351 4734 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.128757 4734 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.128961 4734 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.129000 4734 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.129219 4734 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.129228 4734 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.129966 4734 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.129990 4734 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.131113 4734 state_mem.go:36] "Initialized new in-memory state store" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.131205 4734 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.140076 4734 kubelet.go:418] "Attempting to sync node with API server" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.140143 4734 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.140173 4734 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.140192 4734 kubelet.go:324] "Adding apiserver pod source" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.140210 4734 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.147791 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.147830 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.147958 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.148152 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.152714 4734 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.154275 4734 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.163184 4734 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167266 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167318 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167333 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167347 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167362 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167376 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167384 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167398 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167414 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167423 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167438 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167447 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.167479 4734 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.168982 4734 server.go:1280] "Started kubelet" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.169164 4734 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.169375 4734 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.171414 4734 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.171429 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:00 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.172797 4734 server.go:460] "Adding debug handlers to kubelet server" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.174844 4734 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.174997 4734 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.175133 4734 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.175167 4734 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.175062 4734 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 06:01:40.311536335 +0000 UTC Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.176568 4734 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1244h33m40.13498618s for next certificate rotation Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.185203 4734 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.185322 4734 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.185910 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="200ms" Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.185938 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.186120 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.187314 4734 factory.go:153] Registering CRI-O factory Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.187365 4734 factory.go:221] Registration of the crio container factory successfully Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.187488 4734 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.187515 4734 factory.go:55] Registering systemd factory Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.187540 4734 factory.go:221] Registration of the systemd container factory successfully Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.187574 4734 factory.go:103] Registering Raw factory Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.187604 4734 manager.go:1196] Started watching for new ooms in manager Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.188851 4734 manager.go:319] Starting recovery of all containers Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.185873 4734 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.5:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b35d7d5dffd64 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:28:00.168148324 +0000 UTC m=+2.978610328,LastTimestamp:2025-11-25 09:28:00.168148324 +0000 UTC m=+2.978610328,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.194599 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.194657 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.194672 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197059 4734 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197126 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197140 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197156 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197171 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197183 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197201 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197218 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197235 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197253 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197264 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197279 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197296 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197307 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197322 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197334 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197343 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197353 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197363 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197373 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197383 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197395 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197431 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197441 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197466 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197479 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197491 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197504 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197515 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197526 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197539 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197552 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197564 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197576 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197593 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197609 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197620 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197631 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197643 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197654 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197664 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197673 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197689 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197701 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197741 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197752 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197769 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197780 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197791 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197802 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197818 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197828 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197839 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197851 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197868 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197878 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197889 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197903 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197912 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197930 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197983 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.197992 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198002 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198012 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198021 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198030 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198039 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198051 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198061 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198070 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198095 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198117 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198128 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198137 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198147 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198161 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198172 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198183 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198218 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198237 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198249 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198263 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198278 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198305 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198317 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198328 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198339 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198349 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198360 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198370 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198381 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198409 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198422 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198432 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198447 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198457 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198468 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198480 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198491 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198519 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198529 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198538 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198555 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198566 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198576 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198587 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198599 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198611 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198622 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198632 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198663 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198677 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198687 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198697 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198708 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198718 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198734 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198744 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198753 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198761 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198771 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198785 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198795 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198808 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198816 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198826 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198837 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198848 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198863 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198878 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198888 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198900 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198910 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198921 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198930 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198941 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198955 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198969 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198981 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.198995 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199009 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199023 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199035 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199046 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199055 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199066 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199270 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199301 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199312 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199322 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199332 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199345 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199354 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199370 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199381 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199394 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199406 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199416 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199432 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199443 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199453 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199465 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199476 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199489 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199499 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199512 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199523 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199535 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199546 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199557 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199568 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199580 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199590 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199610 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199620 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199631 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199641 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199655 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199666 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199676 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199687 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199698 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199709 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199720 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199730 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199741 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199752 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199765 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199776 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199789 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199800 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199813 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199823 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199859 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199872 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199883 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199897 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199907 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199919 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199929 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199940 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199951 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199967 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199979 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.199993 4734 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.200003 4734 reconstruct.go:97] "Volume reconstruction finished" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.200022 4734 reconciler.go:26] "Reconciler: start to sync state" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.214015 4734 manager.go:324] Recovery completed Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.225271 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.227874 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.227937 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.227953 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.229101 4734 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.229135 4734 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.229169 4734 state_mem.go:36] "Initialized new in-memory state store" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.243311 4734 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.245598 4734 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.245638 4734 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.245670 4734 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.245770 4734 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.246922 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.246994 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.265248 4734 policy_none.go:49] "None policy: Start" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.268918 4734 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.268969 4734 state_mem.go:35] "Initializing new in-memory state store" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.286483 4734 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.340692 4734 manager.go:334] "Starting Device Plugin manager" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.340764 4734 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.340780 4734 server.go:79] "Starting device plugin registration server" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.341328 4734 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.341385 4734 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.341872 4734 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.342142 4734 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.342178 4734 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.345920 4734 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.346363 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.348596 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.348632 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.348642 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.348843 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.349078 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.349148 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350038 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350113 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350135 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350360 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350437 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350466 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.350660 4734 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350854 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350897 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.350912 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.351306 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.351333 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.351343 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.351470 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.351646 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.351691 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352198 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352243 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352256 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352383 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352526 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352587 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352593 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352647 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.352662 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353269 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353299 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353300 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353310 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353333 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353389 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353760 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353786 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353797 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353816 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.353921 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.354830 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.354865 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.354878 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.386989 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="400ms" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402218 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402388 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402462 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402520 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402544 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402560 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402604 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402627 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402649 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402670 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402753 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402866 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.402975 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.403060 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.403124 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.441970 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.443753 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.443808 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.443824 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.443857 4734 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.444518 4734 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.5:6443: connect: connection refused" node="crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504679 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504759 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504786 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504812 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504834 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504858 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504885 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504904 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504927 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504948 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504967 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504985 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.504993 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505104 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505023 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505173 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505173 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505201 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505218 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505253 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505255 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505281 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505315 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505319 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505306 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505358 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505365 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505360 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505410 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.505340 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.645297 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.647023 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.647063 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.647077 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.647128 4734 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.647799 4734 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.5:6443: connect: connection refused" node="crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.706990 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.715878 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.742996 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.764987 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: I1125 09:28:00.778161 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.788369 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="800ms" Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.811216 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-0ba47875ae87fd58ae390daafc47bd268ecab9ad0cca8ba8515db8f5ac2ad62c WatchSource:0}: Error finding container 0ba47875ae87fd58ae390daafc47bd268ecab9ad0cca8ba8515db8f5ac2ad62c: Status 404 returned error can't find the container with id 0ba47875ae87fd58ae390daafc47bd268ecab9ad0cca8ba8515db8f5ac2ad62c Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.812271 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-6a5437a5eef07fa89fd63e2c7c6109b49e6cb8b5b81deb25c3a778d532ad01da WatchSource:0}: Error finding container 6a5437a5eef07fa89fd63e2c7c6109b49e6cb8b5b81deb25c3a778d532ad01da: Status 404 returned error can't find the container with id 6a5437a5eef07fa89fd63e2c7c6109b49e6cb8b5b81deb25c3a778d532ad01da Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.813408 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-22d1cfdc54aad1e83d41b70e1711157e4bb5744cb76f4449e64db66a6e1d8e5f WatchSource:0}: Error finding container 22d1cfdc54aad1e83d41b70e1711157e4bb5744cb76f4449e64db66a6e1d8e5f: Status 404 returned error can't find the container with id 22d1cfdc54aad1e83d41b70e1711157e4bb5744cb76f4449e64db66a6e1d8e5f Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.814902 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-be7c49f65ded3b3f89d485c5eb7da63a33a044893b81daa38aec1b11d30acbf0 WatchSource:0}: Error finding container be7c49f65ded3b3f89d485c5eb7da63a33a044893b81daa38aec1b11d30acbf0: Status 404 returned error can't find the container with id be7c49f65ded3b3f89d485c5eb7da63a33a044893b81daa38aec1b11d30acbf0 Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.815704 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-ad43b715ce4382fdabb878eb35529ef9c0195e54c13a98584383489bc938ce44 WatchSource:0}: Error finding container ad43b715ce4382fdabb878eb35529ef9c0195e54c13a98584383489bc938ce44: Status 404 returned error can't find the container with id ad43b715ce4382fdabb878eb35529ef9c0195e54c13a98584383489bc938ce44 Nov 25 09:28:00 crc kubenswrapper[4734]: W1125 09:28:00.974937 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:00 crc kubenswrapper[4734]: E1125 09:28:00.975037 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:01 crc kubenswrapper[4734]: W1125 09:28:01.021437 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:01 crc kubenswrapper[4734]: E1125 09:28:01.021547 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.048042 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.049742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.049792 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.049801 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.049830 4734 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:28:01 crc kubenswrapper[4734]: E1125 09:28:01.050424 4734 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.5:6443: connect: connection refused" node="crc" Nov 25 09:28:01 crc kubenswrapper[4734]: W1125 09:28:01.167057 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:01 crc kubenswrapper[4734]: E1125 09:28:01.167190 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.172915 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.251417 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"22d1cfdc54aad1e83d41b70e1711157e4bb5744cb76f4449e64db66a6e1d8e5f"} Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.254675 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0ba47875ae87fd58ae390daafc47bd268ecab9ad0cca8ba8515db8f5ac2ad62c"} Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.258158 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"be7c49f65ded3b3f89d485c5eb7da63a33a044893b81daa38aec1b11d30acbf0"} Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.260367 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"6a5437a5eef07fa89fd63e2c7c6109b49e6cb8b5b81deb25c3a778d532ad01da"} Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.262717 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ad43b715ce4382fdabb878eb35529ef9c0195e54c13a98584383489bc938ce44"} Nov 25 09:28:01 crc kubenswrapper[4734]: E1125 09:28:01.589359 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="1.6s" Nov 25 09:28:01 crc kubenswrapper[4734]: W1125 09:28:01.772876 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:01 crc kubenswrapper[4734]: E1125 09:28:01.772998 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.839763 4734 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 09:28:01 crc kubenswrapper[4734]: E1125 09:28:01.840963 4734 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.851447 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.855396 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.855458 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.855470 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:01 crc kubenswrapper[4734]: I1125 09:28:01.855501 4734 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:28:01 crc kubenswrapper[4734]: E1125 09:28:01.856180 4734 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.5:6443: connect: connection refused" node="crc" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.172640 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.268461 4734 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d" exitCode=0 Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.268577 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.268552 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d"} Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.269512 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.269544 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.269555 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.271806 4734 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="3bd284eafdbd12b20eaec6caa60cc2dc2f174370be30643a23cf25b23721bfea" exitCode=0 Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.271880 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"3bd284eafdbd12b20eaec6caa60cc2dc2f174370be30643a23cf25b23721bfea"} Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.271949 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.272974 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.273017 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.273031 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.277240 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262"} Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.277275 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa"} Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.278699 4734 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="45d71c80add418897710d681473fef3b08895637c09afb7ba660d56c657bafb1" exitCode=0 Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.278767 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"45d71c80add418897710d681473fef3b08895637c09afb7ba660d56c657bafb1"} Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.278828 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.279669 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.279702 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.279716 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.280938 4734 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626" exitCode=0 Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.280999 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.281002 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626"} Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.283993 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.284025 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.284037 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.287467 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.289453 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.289510 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:02 crc kubenswrapper[4734]: I1125 09:28:02.289528 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.172963 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:03 crc kubenswrapper[4734]: E1125 09:28:03.191330 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="3.2s" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.286702 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.286691 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"46afdcaa43f5bc131b67fd12490703381f281179a070573c34a6334c5a75b684"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.287955 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.288010 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.288026 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.290544 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.290605 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.290730 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.292362 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.292394 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.292406 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.294362 4734 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="822a7e2d0e8d867227c57beb44889ba187db20b95786ce399780c4643ceb59ae" exitCode=0 Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.294430 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"822a7e2d0e8d867227c57beb44889ba187db20b95786ce399780c4643ceb59ae"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.294621 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.295839 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.295866 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.295877 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.297966 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.298041 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.301860 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.301999 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.302019 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3"} Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.457024 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.459125 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.459192 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.459206 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:03 crc kubenswrapper[4734]: I1125 09:28:03.459245 4734 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:28:03 crc kubenswrapper[4734]: E1125 09:28:03.459968 4734 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.5:6443: connect: connection refused" node="crc" Nov 25 09:28:03 crc kubenswrapper[4734]: W1125 09:28:03.693941 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:03 crc kubenswrapper[4734]: E1125 09:28:03.694036 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:03 crc kubenswrapper[4734]: W1125 09:28:03.787434 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:03 crc kubenswrapper[4734]: E1125 09:28:03.787580 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:04 crc kubenswrapper[4734]: W1125 09:28:04.051540 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:04 crc kubenswrapper[4734]: E1125 09:28:04.051755 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:04 crc kubenswrapper[4734]: W1125 09:28:04.095653 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:04 crc kubenswrapper[4734]: E1125 09:28:04.096075 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.172901 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.309774 4734 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3483df9a519c4fcd0498429f824886d2494009361e9e343db449859f76643132" exitCode=0 Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.309855 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3483df9a519c4fcd0498429f824886d2494009361e9e343db449859f76643132"} Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.310898 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.312687 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.312735 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.312747 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.315664 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e4a52626153a9cfebdc00f0f56350176ea353b2fe89ca4fe7e26a8ae96ffbc2e"} Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.315706 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5"} Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.315720 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed"} Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.315790 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.315800 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.315816 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.316056 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.317549 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.317586 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.317550 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.317627 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.317639 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.317602 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.318050 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.318217 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.318467 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.318419 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.318718 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:04 crc kubenswrapper[4734]: I1125 09:28:04.318730 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:05 crc kubenswrapper[4734]: I1125 09:28:05.173219 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:05 crc kubenswrapper[4734]: I1125 09:28:05.322730 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9a96ca61bad3ffd3e116cbe0817e9c623c973aef2359c9d0f0b7920a93bfc2af"} Nov 25 09:28:05 crc kubenswrapper[4734]: I1125 09:28:05.322795 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9f41aa3b10b4d4ecff44e59e358405a90883c5b6174683871f770bceb9a5c8cb"} Nov 25 09:28:05 crc kubenswrapper[4734]: I1125 09:28:05.322814 4734 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:28:05 crc kubenswrapper[4734]: I1125 09:28:05.322868 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:05 crc kubenswrapper[4734]: I1125 09:28:05.323770 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:05 crc kubenswrapper[4734]: I1125 09:28:05.323815 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:05 crc kubenswrapper[4734]: I1125 09:28:05.323864 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.173225 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.220597 4734 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 09:28:06 crc kubenswrapper[4734]: E1125 09:28:06.221741 4734 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:06 crc kubenswrapper[4734]: E1125 09:28:06.392376 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="6.4s" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.661043 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.662836 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.662890 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.662902 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.662933 4734 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:28:06 crc kubenswrapper[4734]: E1125 09:28:06.663671 4734 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.5:6443: connect: connection refused" node="crc" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.984738 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.984944 4734 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.985000 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.986484 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.986528 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:06 crc kubenswrapper[4734]: I1125 09:28:06.986543 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:07 crc kubenswrapper[4734]: I1125 09:28:07.173314 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:07 crc kubenswrapper[4734]: I1125 09:28:07.333627 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f3421413d83921060daac40edc6eb8c7a65aa395b583244bb9ea29096f5dc007"} Nov 25 09:28:07 crc kubenswrapper[4734]: W1125 09:28:07.512830 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:07 crc kubenswrapper[4734]: E1125 09:28:07.512911 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:07 crc kubenswrapper[4734]: W1125 09:28:07.696649 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:07 crc kubenswrapper[4734]: E1125 09:28:07.696763 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:07 crc kubenswrapper[4734]: I1125 09:28:07.713331 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:07 crc kubenswrapper[4734]: I1125 09:28:07.713656 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:07 crc kubenswrapper[4734]: I1125 09:28:07.716328 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:07 crc kubenswrapper[4734]: I1125 09:28:07.716401 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:07 crc kubenswrapper[4734]: I1125 09:28:07.716483 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:07 crc kubenswrapper[4734]: W1125 09:28:07.857754 4734 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:07 crc kubenswrapper[4734]: E1125 09:28:07.857848 4734 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.5:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.172314 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.342680 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"bd87fc40ad7a6d46bb59f40ee85b174e37807edd3289c92aa4f80a2a00404fa0"} Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.345553 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.349208 4734 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e4a52626153a9cfebdc00f0f56350176ea353b2fe89ca4fe7e26a8ae96ffbc2e" exitCode=255 Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.349377 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e4a52626153a9cfebdc00f0f56350176ea353b2fe89ca4fe7e26a8ae96ffbc2e"} Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.349914 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.351697 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.351742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.351775 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.352600 4734 scope.go:117] "RemoveContainer" containerID="e4a52626153a9cfebdc00f0f56350176ea353b2fe89ca4fe7e26a8ae96ffbc2e" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.757825 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.758064 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.759487 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.759537 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.759552 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:08 crc kubenswrapper[4734]: I1125 09:28:08.771218 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.088825 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.158606 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.158821 4734 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.158898 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": dial tcp 192.168.126.11:6443: connect: connection refused" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.172865 4734 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.5:6443: connect: connection refused Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.357932 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"acc8f91cc8f19f9b701faf29428f3b33b8ef51b389d439e55af41b75b62e49f0"} Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.358041 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.358941 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.358983 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.358995 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.360480 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.362566 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999"} Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.362628 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.362702 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.362748 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.363478 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.363533 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.363549 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.363809 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.363859 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.363874 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:09 crc kubenswrapper[4734]: I1125 09:28:09.549487 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:10 crc kubenswrapper[4734]: E1125 09:28:10.350792 4734 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.365148 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.365231 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.365303 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.365232 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367032 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367106 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367125 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367243 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367274 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367284 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367296 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367347 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.367370 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.713894 4734 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:28:10 crc kubenswrapper[4734]: I1125 09:28:10.714014 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.367286 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.367513 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.369253 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.369318 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.369281 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.369393 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.369418 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.369344 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:11 crc kubenswrapper[4734]: I1125 09:28:11.375774 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.369994 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.371123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.371155 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.371164 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.619630 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.619854 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.621170 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.621196 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.621205 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.738116 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.738362 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.739770 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.739813 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:12 crc kubenswrapper[4734]: I1125 09:28:12.739825 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:13 crc kubenswrapper[4734]: I1125 09:28:13.064870 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:13 crc kubenswrapper[4734]: I1125 09:28:13.066305 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:13 crc kubenswrapper[4734]: I1125 09:28:13.066345 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:13 crc kubenswrapper[4734]: I1125 09:28:13.066354 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:13 crc kubenswrapper[4734]: I1125 09:28:13.066377 4734 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:28:14 crc kubenswrapper[4734]: I1125 09:28:14.430740 4734 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 09:28:16 crc kubenswrapper[4734]: I1125 09:28:16.851037 4734 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 09:28:16 crc kubenswrapper[4734]: I1125 09:28:16.851142 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 09:28:18 crc kubenswrapper[4734]: I1125 09:28:18.269415 4734 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 25 09:28:18 crc kubenswrapper[4734]: I1125 09:28:18.269512 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 25 09:28:18 crc kubenswrapper[4734]: I1125 09:28:18.441300 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 09:28:18 crc kubenswrapper[4734]: I1125 09:28:18.441544 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:18 crc kubenswrapper[4734]: I1125 09:28:18.443073 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:18 crc kubenswrapper[4734]: I1125 09:28:18.443141 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:18 crc kubenswrapper[4734]: I1125 09:28:18.443154 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:18 crc kubenswrapper[4734]: I1125 09:28:18.479989 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.090063 4734 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.090186 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.172736 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.172930 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.173253 4734 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.173387 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.174572 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.174626 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.174637 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.177972 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.405109 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.405150 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.405646 4734 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.405696 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.412188 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.412242 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.412252 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.412469 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.412518 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.412531 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:19 crc kubenswrapper[4734]: I1125 09:28:19.426268 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 09:28:20 crc kubenswrapper[4734]: E1125 09:28:20.378709 4734 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:28:20 crc kubenswrapper[4734]: I1125 09:28:20.406519 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:20 crc kubenswrapper[4734]: I1125 09:28:20.407701 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:20 crc kubenswrapper[4734]: I1125 09:28:20.407758 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:20 crc kubenswrapper[4734]: I1125 09:28:20.407778 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:20 crc kubenswrapper[4734]: I1125 09:28:20.714547 4734 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:28:20 crc kubenswrapper[4734]: I1125 09:28:20.714625 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 09:28:21 crc kubenswrapper[4734]: I1125 09:28:21.843458 4734 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 09:28:21 crc kubenswrapper[4734]: I1125 09:28:21.845445 4734 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 09:28:21 crc kubenswrapper[4734]: I1125 09:28:21.846536 4734 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 09:28:21 crc kubenswrapper[4734]: I1125 09:28:21.846677 4734 trace.go:236] Trace[16232548]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:28:09.665) (total time: 12181ms): Nov 25 09:28:21 crc kubenswrapper[4734]: Trace[16232548]: ---"Objects listed" error: 12181ms (09:28:21.846) Nov 25 09:28:21 crc kubenswrapper[4734]: Trace[16232548]: [12.181458632s] [12.181458632s] END Nov 25 09:28:21 crc kubenswrapper[4734]: I1125 09:28:21.846728 4734 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 09:28:21 crc kubenswrapper[4734]: E1125 09:28:21.847186 4734 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 09:28:21 crc kubenswrapper[4734]: I1125 09:28:21.865212 4734 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 25 09:28:21 crc kubenswrapper[4734]: I1125 09:28:21.897130 4734 csr.go:261] certificate signing request csr-dqbm7 is approved, waiting to be issued Nov 25 09:28:21 crc kubenswrapper[4734]: I1125 09:28:21.910317 4734 csr.go:257] certificate signing request csr-dqbm7 is issued Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.153064 4734 apiserver.go:52] "Watching apiserver" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.160449 4734 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.160723 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.161240 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.161334 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.161409 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.161461 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.161568 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.161649 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.162060 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.162257 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.162317 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.164613 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.164666 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.164884 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.165037 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.165157 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.165298 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.165322 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.165070 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.165758 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.177028 4734 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.192283 4734 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.195493 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.211660 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.228871 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.248267 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.248541 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.248673 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.250815 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.250919 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.250993 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251062 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251174 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251263 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251335 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251404 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251477 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251595 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251711 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251780 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251856 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.251925 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252000 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252069 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252176 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252254 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252333 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252400 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252463 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252539 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252605 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252745 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252834 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.252964 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.253066 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.253199 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.249147 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.248862 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.249024 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.249229 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.253012 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.253493 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.253803 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.253807 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.253980 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254152 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254278 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254287 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254310 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254301 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254442 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254464 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254524 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254461 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254545 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254607 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254730 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.253297 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254784 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254802 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254809 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254819 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254886 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254912 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254934 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254963 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.254987 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255005 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255007 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255022 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255043 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255061 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255104 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255126 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255163 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255254 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255288 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255312 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255318 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255349 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255377 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255401 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255428 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255429 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255456 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255500 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255522 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255531 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255695 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255731 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255755 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255780 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255805 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255829 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255786 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255841 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255855 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255891 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255899 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255923 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255954 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.255985 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256011 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256035 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256059 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256103 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256131 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256158 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256184 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256438 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256468 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256492 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256513 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256534 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256551 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256561 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256613 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256645 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256661 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256674 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256745 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256776 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256876 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256911 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256947 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256965 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256980 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.256971 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.257006 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.257031 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.257054 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.257100 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.260547 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261458 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261533 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261682 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261732 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.262020 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.257112 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.260471 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.260664 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.260847 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.260940 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.269767 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.269878 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.269958 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.260970 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.260961 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261190 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261276 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261391 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261365 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.261505 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.262667 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.262876 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.263719 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.264157 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.264358 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.270033 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.264575 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.265296 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.265429 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.265491 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.265572 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.265991 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.266286 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.266371 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.266546 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.267887 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.268012 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.268438 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.268473 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.268806 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.269453 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.269513 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.269746 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.270381 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.270617 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.270629 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.270847 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.270904 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.268774 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.260902 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.271907 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.272046 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.272060 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274372 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274451 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274482 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274515 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274546 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274572 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274595 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274624 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274652 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274869 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274916 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274949 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.274981 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275022 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275054 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275106 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275143 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275172 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275208 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275241 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275271 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275299 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275334 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275367 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275388 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275410 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275431 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275449 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275471 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275494 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275517 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275536 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275557 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275575 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275593 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275649 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275670 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275691 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275708 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275728 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275748 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275764 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275783 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.275801 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276376 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276413 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276443 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276470 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276493 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276522 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276630 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276659 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276689 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276711 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276739 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276764 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276799 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276821 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276848 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276875 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276897 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276921 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276947 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.276974 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277001 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277027 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277053 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277074 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277115 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277142 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277165 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277220 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.277577 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.279308 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.279347 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281659 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281734 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281774 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281807 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281842 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281874 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281910 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281939 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281970 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282001 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282030 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282315 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282409 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282441 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282472 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282501 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282530 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282603 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282640 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282676 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282704 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282739 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282780 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282812 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282847 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282879 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282911 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282940 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282968 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283001 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283033 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283155 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283175 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283191 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283208 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.280187 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.280772 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.280864 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281183 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.281819 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283802 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282262 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282577 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282653 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282674 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282682 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.282979 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283037 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283271 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.283373 4734 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.285185 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.285298 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.285477 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.285524 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.285681 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.285694 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.285943 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.286203 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.286358 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.286368 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.286910 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287008 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287112 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287361 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287451 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287474 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287484 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.286857 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287815 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287773 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.287872 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.288025 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.288294 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.288362 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.288944 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.289304 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.283507 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:28:22.783478574 +0000 UTC m=+25.593940748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.289399 4734 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.289505 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:22.789428132 +0000 UTC m=+25.599890116 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.289531 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.289524 4734 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.289651 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:22.789617397 +0000 UTC m=+25.600079391 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283548 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283728 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283898 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.290103 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.290153 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.290252 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.290410 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.290436 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.291545 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.291910 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.283223 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293686 4734 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293712 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293731 4734 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293746 4734 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293759 4734 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293773 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293788 4734 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293803 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293817 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293830 4734 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.293843 4734 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294546 4734 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294571 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294609 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294625 4734 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294638 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294651 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294663 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294677 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294692 4734 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294706 4734 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294720 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294736 4734 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294750 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294763 4734 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294776 4734 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294791 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294805 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294819 4734 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294835 4734 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294848 4734 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294860 4734 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294873 4734 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294885 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294898 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294909 4734 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294922 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294935 4734 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294948 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294960 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294973 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.294985 4734 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295000 4734 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295011 4734 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295024 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295037 4734 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295051 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295064 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295096 4734 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295110 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295121 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295132 4734 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295145 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295157 4734 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295170 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295181 4734 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295194 4734 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295215 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295232 4734 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295249 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295262 4734 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295275 4734 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295288 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295300 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295312 4734 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295324 4734 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295337 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295350 4734 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295363 4734 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295377 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295391 4734 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.295404 4734 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.302659 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.302708 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.302728 4734 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.302808 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:22.8027808 +0000 UTC m=+25.613242794 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.302639 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.316635 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.316681 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.316699 4734 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.316780 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:22.816755188 +0000 UTC m=+25.627217182 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.317995 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.318071 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.318223 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.318154 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.318551 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.319019 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.319658 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.320531 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.321077 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.321443 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.323537 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.323735 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.323911 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.325370 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.325436 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.326186 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.327354 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.328261 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.328420 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.328578 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.328574 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.328725 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.329070 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.329206 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.328209 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.329380 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.329404 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.338668 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.339235 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.339388 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.339722 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.339854 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.339951 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.339955 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.340219 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.340406 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.340736 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.347327 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.348135 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.348150 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.348936 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.351351 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.351481 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.350366 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.352494 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.353170 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.353423 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.353501 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.354049 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.354361 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.354487 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.355675 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.355921 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.356857 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.356935 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.360708 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.371048 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.371172 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.378019 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.378061 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.378203 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.378398 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.378551 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.378630 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.378956 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.379360 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.379492 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.379689 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.391206 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.395983 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396025 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396120 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396135 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396145 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396154 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396163 4734 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396174 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396184 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396195 4734 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396205 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396215 4734 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396226 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396236 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396247 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396258 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396268 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396279 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396289 4734 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396299 4734 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396316 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396326 4734 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396338 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396349 4734 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396360 4734 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396371 4734 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396383 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396394 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396405 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396416 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396428 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396437 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396448 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396459 4734 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396468 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396477 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396487 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396498 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396507 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396517 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396527 4734 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396536 4734 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396548 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396556 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396566 4734 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396576 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396586 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396595 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396605 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396615 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396624 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396634 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396646 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396655 4734 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396665 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396675 4734 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396685 4734 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396695 4734 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396705 4734 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396715 4734 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396725 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396735 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396749 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396760 4734 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396771 4734 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396781 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396793 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396803 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396814 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396824 4734 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396835 4734 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396845 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396855 4734 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396866 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396877 4734 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396887 4734 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396898 4734 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396909 4734 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396920 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396930 4734 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396945 4734 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396955 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396965 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396976 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.396992 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397002 4734 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397012 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397023 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397033 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397044 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397054 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397065 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397075 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397104 4734 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397114 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397124 4734 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397142 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397152 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397163 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397173 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397183 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397193 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397205 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397215 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397224 4734 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397233 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397243 4734 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397254 4734 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397268 4734 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397280 4734 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397290 4734 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397301 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397603 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.397630 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.400276 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.401742 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.404495 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.404584 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.404640 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.404903 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.417377 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.418156 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.422559 4734 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999" exitCode=255 Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.422622 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999"} Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.422707 4734 scope.go:117] "RemoveContainer" containerID="e4a52626153a9cfebdc00f0f56350176ea353b2fe89ca4fe7e26a8ae96ffbc2e" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.424502 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.438653 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-sqcpf"] Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.438981 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-qhhvk"] Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.439184 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.439511 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-sqcpf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.440435 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.443010 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.446715 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.446919 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.446976 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.447062 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.447073 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.446985 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.480632 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.489255 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.496659 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498849 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5k7x\" (UniqueName: \"kubernetes.io/projected/4ce04403-9506-4775-83ce-62ced0a6f576-kube-api-access-p5k7x\") pod \"node-resolver-sqcpf\" (UID: \"4ce04403-9506-4775-83ce-62ced0a6f576\") " pod="openshift-dns/node-resolver-sqcpf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498880 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d67566fa-8990-4e98-93f5-b43f2bada700-serviceca\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498905 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4ce04403-9506-4775-83ce-62ced0a6f576-hosts-file\") pod \"node-resolver-sqcpf\" (UID: \"4ce04403-9506-4775-83ce-62ced0a6f576\") " pod="openshift-dns/node-resolver-sqcpf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498922 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d67566fa-8990-4e98-93f5-b43f2bada700-host\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498936 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzws6\" (UniqueName: \"kubernetes.io/projected/d67566fa-8990-4e98-93f5-b43f2bada700-kube-api-access-gzws6\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498959 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498970 4734 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498979 4734 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498987 4734 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.498995 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.499005 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.499013 4734 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.507155 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.549473 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.572640 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.593784 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.600159 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d67566fa-8990-4e98-93f5-b43f2bada700-serviceca\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.600507 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4ce04403-9506-4775-83ce-62ced0a6f576-hosts-file\") pod \"node-resolver-sqcpf\" (UID: \"4ce04403-9506-4775-83ce-62ced0a6f576\") " pod="openshift-dns/node-resolver-sqcpf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.600673 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d67566fa-8990-4e98-93f5-b43f2bada700-host\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.600786 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzws6\" (UniqueName: \"kubernetes.io/projected/d67566fa-8990-4e98-93f5-b43f2bada700-kube-api-access-gzws6\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.600928 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5k7x\" (UniqueName: \"kubernetes.io/projected/4ce04403-9506-4775-83ce-62ced0a6f576-kube-api-access-p5k7x\") pod \"node-resolver-sqcpf\" (UID: \"4ce04403-9506-4775-83ce-62ced0a6f576\") " pod="openshift-dns/node-resolver-sqcpf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.602135 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4ce04403-9506-4775-83ce-62ced0a6f576-hosts-file\") pod \"node-resolver-sqcpf\" (UID: \"4ce04403-9506-4775-83ce-62ced0a6f576\") " pod="openshift-dns/node-resolver-sqcpf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.602237 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d67566fa-8990-4e98-93f5-b43f2bada700-host\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.603387 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d67566fa-8990-4e98-93f5-b43f2bada700-serviceca\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.631175 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.633047 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzws6\" (UniqueName: \"kubernetes.io/projected/d67566fa-8990-4e98-93f5-b43f2bada700-kube-api-access-gzws6\") pod \"node-ca-qhhvk\" (UID: \"d67566fa-8990-4e98-93f5-b43f2bada700\") " pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.644990 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5k7x\" (UniqueName: \"kubernetes.io/projected/4ce04403-9506-4775-83ce-62ced0a6f576-kube-api-access-p5k7x\") pod \"node-resolver-sqcpf\" (UID: \"4ce04403-9506-4775-83ce-62ced0a6f576\") " pod="openshift-dns/node-resolver-sqcpf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.660343 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.665563 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.665643 4734 scope.go:117] "RemoveContainer" containerID="01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.665894 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.683735 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.699269 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.715669 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.729108 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.742393 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.751912 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.759498 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-qhhvk" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.766966 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.769465 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-sqcpf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.802510 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.802584 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.802643 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.802731 4734 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.802789 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:23.802771349 +0000 UTC m=+26.613233343 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.802857 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:28:23.802848321 +0000 UTC m=+26.613310315 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.802939 4734 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.802973 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:23.802964485 +0000 UTC m=+26.613426479 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.803219 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-2n2f8"] Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.803685 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.808535 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.808586 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.808700 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.808865 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.809133 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.819260 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.832193 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.844384 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.854320 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.872944 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.887577 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.903336 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.903390 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.903538 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.903714 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.903732 4734 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.903817 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:23.903802596 +0000 UTC m=+26.714264590 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.904109 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.904128 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.904137 4734 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:22 crc kubenswrapper[4734]: E1125 09:28:22.904209 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:23.904174057 +0000 UTC m=+26.714636051 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.905124 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4a52626153a9cfebdc00f0f56350176ea353b2fe89ca4fe7e26a8ae96ffbc2e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:07Z\\\",\\\"message\\\":\\\"W1125 09:28:05.348573 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:28:05.349859 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764062885 cert, and key in /tmp/serving-cert-3163147414/serving-signer.crt, /tmp/serving-cert-3163147414/serving-signer.key\\\\nI1125 09:28:06.784929 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:28:06.815028 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:28:06.815269 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:06.906295 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3163147414/tls.crt::/tmp/serving-cert-3163147414/tls.key\\\\\\\"\\\\nF1125 09:28:07.426455 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.911911 4734 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-25 09:23:21 +0000 UTC, rotation deadline is 2026-08-08 13:02:44.650716056 +0000 UTC Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.911989 4734 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6147h34m21.738730746s for next certificate rotation Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.917642 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.929099 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:22 crc kubenswrapper[4734]: I1125 09:28:22.938230 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.004398 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-rootfs\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.004444 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-proxy-tls\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.004482 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-mcd-auth-proxy-config\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.004510 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wbjj\" (UniqueName: \"kubernetes.io/projected/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-kube-api-access-4wbjj\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.104929 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wbjj\" (UniqueName: \"kubernetes.io/projected/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-kube-api-access-4wbjj\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.105003 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-rootfs\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.105034 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-proxy-tls\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.105068 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-mcd-auth-proxy-config\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.105143 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-rootfs\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.105914 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-mcd-auth-proxy-config\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.109754 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-proxy-tls\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.123235 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wbjj\" (UniqueName: \"kubernetes.io/projected/b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1-kube-api-access-4wbjj\") pod \"machine-config-daemon-2n2f8\" (UID: \"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\") " pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.187603 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-7t7mh"] Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.188003 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-jg2nq"] Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.188214 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.190139 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.191229 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.191242 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.191404 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.191568 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.191625 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.192285 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.193635 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2vvjj"] Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.197318 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.197857 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.202492 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.202506 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.202711 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.203677 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.203697 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.204599 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205053 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205532 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-kubelet\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205579 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-etc-kubernetes\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205603 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-cni-multus\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205633 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhfm8\" (UniqueName: \"kubernetes.io/projected/80259512-c4ac-4362-b21e-386796e31645-kube-api-access-lhfm8\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205658 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-system-cni-dir\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205692 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-netns\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205715 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205739 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-bin\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205764 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-ovn-kubernetes\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205796 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-kubelet\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205816 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-systemd-units\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205837 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-ovn\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205859 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-config\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205903 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/80259512-c4ac-4362-b21e-386796e31645-cni-binary-copy\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.205941 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/80259512-c4ac-4362-b21e-386796e31645-multus-daemon-config\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206002 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-multus-certs\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206074 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-systemd\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206124 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-cni-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206148 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-cnibin\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206170 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4da62509-e117-444b-9f78-c5c9e52b1b87-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206191 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-slash\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206225 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-netd\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206246 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovn-node-metrics-cert\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206269 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-script-lib\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206292 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-socket-dir-parent\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206313 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-cni-bin\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206392 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-node-log\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206429 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206465 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-cnibin\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206490 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-k8s-cni-cncf-io\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206513 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-netns\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206539 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206559 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bshnh\" (UniqueName: \"kubernetes.io/projected/4da62509-e117-444b-9f78-c5c9e52b1b87-kube-api-access-bshnh\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206579 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-env-overrides\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206599 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-system-cni-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206617 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-os-release\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206635 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-hostroot\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206675 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-var-lib-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206701 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-etc-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206716 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-log-socket\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206729 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-os-release\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206745 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4da62509-e117-444b-9f78-c5c9e52b1b87-cni-binary-copy\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206762 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fvtk\" (UniqueName: \"kubernetes.io/projected/cbdcaaef-9e1d-421f-b1fa-05223f0067af-kube-api-access-9fvtk\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.206775 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-conf-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.227426 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.243360 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.259065 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.270128 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.279530 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.288953 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.301124 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308121 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-netns\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308164 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308197 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-bin\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308221 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-ovn-kubernetes\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308244 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-systemd-units\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308263 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-ovn\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308283 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-config\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308311 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-kubelet\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308332 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/80259512-c4ac-4362-b21e-386796e31645-cni-binary-copy\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308352 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/80259512-c4ac-4362-b21e-386796e31645-multus-daemon-config\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308372 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-multus-certs\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308397 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-systemd\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308411 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-cni-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308438 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-cnibin\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308453 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4da62509-e117-444b-9f78-c5c9e52b1b87-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308468 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-slash\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308483 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-netd\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308500 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovn-node-metrics-cert\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308527 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-script-lib\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308544 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-socket-dir-parent\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308560 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-cni-bin\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308574 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-netns\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308587 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-node-log\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308602 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308620 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-cnibin\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308635 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-k8s-cni-cncf-io\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308651 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-os-release\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308665 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-hostroot\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308682 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308699 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bshnh\" (UniqueName: \"kubernetes.io/projected/4da62509-e117-444b-9f78-c5c9e52b1b87-kube-api-access-bshnh\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308714 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-env-overrides\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308745 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-system-cni-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308772 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-etc-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308785 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-log-socket\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308800 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-var-lib-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308814 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-os-release\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308828 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4da62509-e117-444b-9f78-c5c9e52b1b87-cni-binary-copy\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308845 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fvtk\" (UniqueName: \"kubernetes.io/projected/cbdcaaef-9e1d-421f-b1fa-05223f0067af-kube-api-access-9fvtk\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308858 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-conf-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308873 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-kubelet\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308888 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-etc-kubernetes\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308902 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-cni-multus\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308916 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhfm8\" (UniqueName: \"kubernetes.io/projected/80259512-c4ac-4362-b21e-386796e31645-kube-api-access-lhfm8\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308931 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-system-cni-dir\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.308999 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-system-cni-dir\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.309033 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-netns\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.309053 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.309072 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-bin\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.309120 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-ovn-kubernetes\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.309141 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-systemd-units\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.309171 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-ovn\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.309719 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-config\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.309754 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-kubelet\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310248 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/80259512-c4ac-4362-b21e-386796e31645-cni-binary-copy\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310630 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/80259512-c4ac-4362-b21e-386796e31645-multus-daemon-config\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310660 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-var-lib-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310674 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-kubelet\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310689 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-conf-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310725 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-hostroot\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310809 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-etc-kubernetes\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310853 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-etc-openvswitch\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.310885 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-os-release\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311067 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-netns\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311186 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-cni-bin\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311139 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-socket-dir-parent\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311235 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-node-log\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311239 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-cnibin\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311198 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-log-socket\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311277 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-cnibin\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311285 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-slash\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311261 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-k8s-cni-cncf-io\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311319 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-systemd\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311339 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-multus-cni-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311358 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-run-multus-certs\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311379 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-netd\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311550 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-env-overrides\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311727 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4da62509-e117-444b-9f78-c5c9e52b1b87-cni-binary-copy\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311734 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311894 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/4da62509-e117-444b-9f78-c5c9e52b1b87-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.311918 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-host-var-lib-cni-multus\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.312072 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-script-lib\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.312171 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-system-cni-dir\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.312276 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/80259512-c4ac-4362-b21e-386796e31645-os-release\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.312483 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/4da62509-e117-444b-9f78-c5c9e52b1b87-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.314716 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4a52626153a9cfebdc00f0f56350176ea353b2fe89ca4fe7e26a8ae96ffbc2e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:07Z\\\",\\\"message\\\":\\\"W1125 09:28:05.348573 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:28:05.349859 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764062885 cert, and key in /tmp/serving-cert-3163147414/serving-signer.crt, /tmp/serving-cert-3163147414/serving-signer.key\\\\nI1125 09:28:06.784929 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:28:06.815028 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:28:06.815269 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:06.906295 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3163147414/tls.crt::/tmp/serving-cert-3163147414/tls.key\\\\\\\"\\\\nF1125 09:28:07.426455 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.315378 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovn-node-metrics-cert\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.329641 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.331553 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhfm8\" (UniqueName: \"kubernetes.io/projected/80259512-c4ac-4362-b21e-386796e31645-kube-api-access-lhfm8\") pod \"multus-7t7mh\" (UID: \"80259512-c4ac-4362-b21e-386796e31645\") " pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.331768 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fvtk\" (UniqueName: \"kubernetes.io/projected/cbdcaaef-9e1d-421f-b1fa-05223f0067af-kube-api-access-9fvtk\") pod \"ovnkube-node-2vvjj\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.332756 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bshnh\" (UniqueName: \"kubernetes.io/projected/4da62509-e117-444b-9f78-c5c9e52b1b87-kube-api-access-bshnh\") pod \"multus-additional-cni-plugins-jg2nq\" (UID: \"4da62509-e117-444b-9f78-c5c9e52b1b87\") " pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.340980 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.353241 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.365463 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4a52626153a9cfebdc00f0f56350176ea353b2fe89ca4fe7e26a8ae96ffbc2e\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:07Z\\\",\\\"message\\\":\\\"W1125 09:28:05.348573 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:28:05.349859 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764062885 cert, and key in /tmp/serving-cert-3163147414/serving-signer.crt, /tmp/serving-cert-3163147414/serving-signer.key\\\\nI1125 09:28:06.784929 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:28:06.815028 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:28:06.815269 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:06.906295 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3163147414/tls.crt::/tmp/serving-cert-3163147414/tls.key\\\\\\\"\\\\nF1125 09:28:07.426455 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.376870 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.390763 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.418332 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.419424 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.427444 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.427529 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"3f6cceaa0db7254c40bf95086ccd6304c12cfff9f7e54b500cc5d7df1bba223a"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.434505 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.439074 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.454628 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.454795 4734 scope.go:117] "RemoveContainer" containerID="01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999" Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.455389 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.457477 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-qhhvk" event={"ID":"d67566fa-8990-4e98-93f5-b43f2bada700","Type":"ContainerStarted","Data":"24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.457531 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-qhhvk" event={"ID":"d67566fa-8990-4e98-93f5-b43f2bada700","Type":"ContainerStarted","Data":"c9a71c5ad5c9faffd79b6c53f0e60fab4a94ac507581e0074fd395b40f1486fe"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.473305 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.473366 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.473386 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fac84f82ff4ec8a958661a42f59d3e851f4fda4100900fd9fa36903639018934"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.474775 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"6354ad1819dd7682b9d03d41b7337247f69906ab45e35d1116b3f18989b3a436"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.476729 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-sqcpf" event={"ID":"4ce04403-9506-4775-83ce-62ced0a6f576","Type":"ContainerStarted","Data":"066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.476779 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-sqcpf" event={"ID":"4ce04403-9506-4775-83ce-62ced0a6f576","Type":"ContainerStarted","Data":"faf92a9f49b8363f2c7a65b04f42f3656c2143a977baaca336624f3f395b6585"} Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.479761 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.493228 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.503066 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.511271 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-7t7mh" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.514637 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.517396 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" Nov 25 09:28:23 crc kubenswrapper[4734]: W1125 09:28:23.521634 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80259512_c4ac_4362_b21e_386796e31645.slice/crio-e9c3ac1ae1ffbc77a68c9af61ee1353060d6cb327a656a6cf0e094434dbc196b WatchSource:0}: Error finding container e9c3ac1ae1ffbc77a68c9af61ee1353060d6cb327a656a6cf0e094434dbc196b: Status 404 returned error can't find the container with id e9c3ac1ae1ffbc77a68c9af61ee1353060d6cb327a656a6cf0e094434dbc196b Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.524654 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.526520 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.537958 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.556066 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.564559 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.582351 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.596017 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.608587 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.619435 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.639507 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.655074 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.671787 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.704273 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.743202 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.789588 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.815496 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.815836 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:28:25.815794116 +0000 UTC m=+28.626256130 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.816188 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.816222 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.816376 4734 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.816440 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:25.816422475 +0000 UTC m=+28.626884469 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.816718 4734 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.816753 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:25.816745654 +0000 UTC m=+28.627207648 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.824908 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.861820 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.916977 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:23 crc kubenswrapper[4734]: I1125 09:28:23.917043 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.917255 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.917279 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.917294 4734 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.917355 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:25.917335378 +0000 UTC m=+28.727797372 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.917771 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.917790 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.917800 4734 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:23 crc kubenswrapper[4734]: E1125 09:28:23.917828 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:25.917819122 +0000 UTC m=+28.728281116 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.246815 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.246903 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.247010 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:24 crc kubenswrapper[4734]: E1125 09:28:24.247000 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:24 crc kubenswrapper[4734]: E1125 09:28:24.247179 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:24 crc kubenswrapper[4734]: E1125 09:28:24.247394 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.251407 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.252269 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.253479 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.254270 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.255470 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.256120 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.256721 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.257664 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.258295 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.259253 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.259771 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.260864 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.261389 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.261878 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.262735 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.263257 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.264400 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.264890 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.265530 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.266525 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.266993 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.267958 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.268450 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.269451 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.269989 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.270886 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.272367 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.272848 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.273858 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.274389 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.275224 4734 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.275326 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.276990 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.278109 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.278568 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.280472 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.281322 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.282417 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.283202 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.284329 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.284851 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.286066 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.286773 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.287892 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.288451 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.289549 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.290333 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.291597 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.292175 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.293213 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.293710 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.294714 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.295346 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.295876 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.481636 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122" exitCode=0 Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.481733 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.481776 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"931bde0760310df67498c1541e9070111f4471c0e8a1ee56c2291367dd19dace"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.484283 4734 generic.go:334] "Generic (PLEG): container finished" podID="4da62509-e117-444b-9f78-c5c9e52b1b87" containerID="eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306" exitCode=0 Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.484389 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerDied","Data":"eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.484488 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerStarted","Data":"e7ca9c81fda324b0743f052164ac46270122ff0d6b7da0506023e1e6051562ea"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.485961 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7t7mh" event={"ID":"80259512-c4ac-4362-b21e-386796e31645","Type":"ContainerStarted","Data":"748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.486003 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7t7mh" event={"ID":"80259512-c4ac-4362-b21e-386796e31645","Type":"ContainerStarted","Data":"e9c3ac1ae1ffbc77a68c9af61ee1353060d6cb327a656a6cf0e094434dbc196b"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.487680 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.487716 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.487727 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"6bf497efa52d4b65b1a411167b31a2012060612936978a2dc642b1e6fec94f8e"} Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.508912 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.536534 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.551773 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.569355 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.587984 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.604977 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.620205 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.632887 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.649118 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.670849 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.687608 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.709622 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.727541 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.751711 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.774022 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.790751 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.809622 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.826634 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.841919 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.856944 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.871847 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.888706 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.904710 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.919588 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.933980 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:24 crc kubenswrapper[4734]: I1125 09:28:24.950795 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:24Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.496303 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8"} Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.496473 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40"} Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.496489 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778"} Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.496498 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864"} Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.496507 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5"} Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.498564 4734 generic.go:334] "Generic (PLEG): container finished" podID="4da62509-e117-444b-9f78-c5c9e52b1b87" containerID="4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db" exitCode=0 Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.498703 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerDied","Data":"4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db"} Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.499883 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998"} Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.518882 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.554580 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.577148 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.601841 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.618536 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.639136 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.656183 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.672491 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.686284 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.701293 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.723720 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.746158 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.766231 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.785145 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.815225 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.831563 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.839303 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.839546 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:28:29.839508699 +0000 UTC m=+32.649970703 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.839663 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.839706 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.839856 4734 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.840014 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:29.839954992 +0000 UTC m=+32.650416986 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.839862 4734 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.840130 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:29.840107227 +0000 UTC m=+32.650569221 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.847396 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.865474 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.882737 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.900789 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.915205 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.934316 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.940957 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.941019 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.941223 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.941277 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.941241 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.941293 4734 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.941316 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.941334 4734 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.941366 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:29.94134536 +0000 UTC m=+32.751807404 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:25 crc kubenswrapper[4734]: E1125 09:28:25.941397 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:29.941379501 +0000 UTC m=+32.751841495 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.953120 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.965424 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:25 crc kubenswrapper[4734]: I1125 09:28:25.990933 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:25Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.008338 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.246229 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.246318 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.246404 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:26 crc kubenswrapper[4734]: E1125 09:28:26.246615 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:26 crc kubenswrapper[4734]: E1125 09:28:26.246832 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:26 crc kubenswrapper[4734]: E1125 09:28:26.246738 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.506876 4734 generic.go:334] "Generic (PLEG): container finished" podID="4da62509-e117-444b-9f78-c5c9e52b1b87" containerID="da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1" exitCode=0 Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.507094 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerDied","Data":"da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1"} Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.512924 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583"} Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.531188 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.551285 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.572147 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.590433 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.610988 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.626610 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.641333 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.655172 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.668372 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.683943 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.696688 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.712282 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:26 crc kubenswrapper[4734]: I1125 09:28:26.728563 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.523505 4734 generic.go:334] "Generic (PLEG): container finished" podID="4da62509-e117-444b-9f78-c5c9e52b1b87" containerID="ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396" exitCode=0 Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.523597 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerDied","Data":"ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396"} Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.548944 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.577828 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.596686 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.616027 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.639395 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.662172 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.687562 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.704450 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.721204 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.737073 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.754805 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.769680 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.772557 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.777144 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.784944 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.785006 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.803717 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.827832 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.848831 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.867374 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.889746 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.904093 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.917276 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.928542 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.942776 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.959974 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.973770 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:27 crc kubenswrapper[4734]: I1125 09:28:27.988236 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.002394 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.245993 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.246006 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.246532 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.246653 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.246120 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.247026 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.268610 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.269608 4734 scope.go:117] "RemoveContainer" containerID="01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999" Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.269833 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.530829 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerStarted","Data":"ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7"} Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.548416 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.562321 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.576624 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.590485 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.609484 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.627348 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.646735 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.669793 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.690961 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.706276 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.725510 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.742017 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.756230 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.771826 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.848346 4734 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.850884 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.850942 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.850956 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.851156 4734 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.864905 4734 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.865182 4734 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.870272 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.870349 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.870365 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.870388 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.870405 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:28Z","lastTransitionTime":"2025-11-25T09:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.885996 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.892573 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.892791 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.892864 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.892932 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.892992 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:28Z","lastTransitionTime":"2025-11-25T09:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.907910 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.913585 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.913632 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.913645 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.913663 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.913678 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:28Z","lastTransitionTime":"2025-11-25T09:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.928549 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.932565 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.932609 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.932618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.932638 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.932649 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:28Z","lastTransitionTime":"2025-11-25T09:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.945471 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.949710 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.949772 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.949785 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.949808 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.949822 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:28Z","lastTransitionTime":"2025-11-25T09:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.964897 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:28Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:28 crc kubenswrapper[4734]: E1125 09:28:28.965129 4734 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.967843 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.967893 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.967906 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.967926 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:28 crc kubenswrapper[4734]: I1125 09:28:28.967943 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:28Z","lastTransitionTime":"2025-11-25T09:28:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.076133 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.076186 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.076200 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.076220 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.076234 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.181552 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.181619 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.181632 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.181653 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.181668 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.284600 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.284680 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.284694 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.284717 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.284735 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.387610 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.387652 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.387662 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.387678 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.387689 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.490102 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.490149 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.490159 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.490176 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.490187 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.539484 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.592802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.592844 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.592855 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.592873 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.592887 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.695052 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.695120 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.695134 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.695168 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.695196 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.719194 4734 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.798444 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.798494 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.798504 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.798521 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.798532 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.884211 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.884490 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:28:37.884460973 +0000 UTC m=+40.694922967 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.884554 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.884582 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.884704 4734 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.884768 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:37.884752902 +0000 UTC m=+40.695214896 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.884760 4734 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.884874 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:37.884847604 +0000 UTC m=+40.695309678 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.901671 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.901740 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.901760 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.901788 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.901806 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:29Z","lastTransitionTime":"2025-11-25T09:28:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.985488 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.985762 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.985831 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.985847 4734 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:29 crc kubenswrapper[4734]: I1125 09:28:29.985788 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.985925 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:37.985902652 +0000 UTC m=+40.796364646 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.986328 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.986381 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.986400 4734 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:29 crc kubenswrapper[4734]: E1125 09:28:29.986486 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:37.986462149 +0000 UTC m=+40.796924343 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.005006 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.005057 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.005067 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.005122 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.005139 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.107971 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.108019 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.108031 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.108048 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.108059 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.210429 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.210470 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.210480 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.210495 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.210506 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.247318 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.247452 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.247492 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:30 crc kubenswrapper[4734]: E1125 09:28:30.247552 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:30 crc kubenswrapper[4734]: E1125 09:28:30.247640 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:30 crc kubenswrapper[4734]: E1125 09:28:30.247795 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.261919 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.277821 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.291604 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.305027 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.312580 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.312618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.312629 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.312650 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.312660 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.317921 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.336559 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.354024 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.368956 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.386040 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.404716 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.431955 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.431998 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.432008 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.432026 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.432037 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.454265 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.476384 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.502159 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.514463 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.534973 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.535022 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.535032 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.535046 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.535059 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.638630 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.638680 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.638697 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.638715 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.638729 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.745945 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.746004 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.746019 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.746041 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.746073 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.849389 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.849953 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.849967 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.849987 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.850000 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.953368 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.953971 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.954053 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.954215 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:30 crc kubenswrapper[4734]: I1125 09:28:30.954301 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:30Z","lastTransitionTime":"2025-11-25T09:28:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.056748 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.056782 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.056792 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.056809 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.056823 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.159387 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.159448 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.159463 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.159489 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.159504 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.263524 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.263589 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.263603 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.263625 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.263638 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.367310 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.367377 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.367395 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.367422 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.367444 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.469887 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.469942 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.469952 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.469972 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.469984 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.550561 4734 generic.go:334] "Generic (PLEG): container finished" podID="4da62509-e117-444b-9f78-c5c9e52b1b87" containerID="ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7" exitCode=0 Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.550660 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerDied","Data":"ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.559809 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.560200 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.574608 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.575240 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.575282 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.575301 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.575324 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.575338 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.591212 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.606478 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.622598 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.626885 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.643266 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.662271 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.678429 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.678472 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.678483 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.678504 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.678518 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.681108 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.703803 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.720905 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.738186 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.757826 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.775222 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.782292 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.782334 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.782344 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.782364 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.782375 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.791946 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.806665 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.823383 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.843549 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.866253 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.880722 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.885040 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.885078 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.885102 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.885118 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.885132 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.891878 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.916462 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.933809 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.951452 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.969683 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.987849 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.989263 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.989294 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.989303 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.989330 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:31 crc kubenswrapper[4734]: I1125 09:28:31.989340 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:31Z","lastTransitionTime":"2025-11-25T09:28:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.004816 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.024140 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.045367 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.061226 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.092814 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.092858 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.092870 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.092892 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.092904 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.197390 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.197487 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.197511 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.197533 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.197544 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.246616 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.246616 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.246689 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:32 crc kubenswrapper[4734]: E1125 09:28:32.246782 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:32 crc kubenswrapper[4734]: E1125 09:28:32.246830 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:32 crc kubenswrapper[4734]: E1125 09:28:32.246944 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.301149 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.301533 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.301544 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.301562 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.301574 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.404656 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.404704 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.404713 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.404732 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.404742 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.507652 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.507716 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.507729 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.507748 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.507763 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.568112 4734 generic.go:334] "Generic (PLEG): container finished" podID="4da62509-e117-444b-9f78-c5c9e52b1b87" containerID="11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e" exitCode=0 Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.568210 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerDied","Data":"11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.568279 4734 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.569303 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.586906 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.599033 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.600473 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.613497 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.613543 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.613556 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.613574 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.613585 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.615305 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.628071 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.644275 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.659673 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.679548 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.702658 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.720800 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.722848 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.723203 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.723328 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.723488 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.723635 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.738055 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.755681 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.775135 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.793589 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.806457 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.822132 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.826268 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.826318 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.826332 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.826352 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.826367 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.837745 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.853417 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.869799 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.883207 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.896522 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.912784 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.928936 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.929115 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.929144 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.929156 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.929177 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.929189 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:32Z","lastTransitionTime":"2025-11-25T09:28:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.946899 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.969706 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:32 crc kubenswrapper[4734]: I1125 09:28:32.988919 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.004987 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.022684 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.032433 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.032477 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.032487 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.032512 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.032531 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.039880 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.135902 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.135954 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.135971 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.135989 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.136000 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.238504 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.238548 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.238558 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.238575 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.238585 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.341158 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.341208 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.341222 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.341240 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.341252 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.443120 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.443173 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.443190 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.443209 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.443221 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.545280 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.545327 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.545340 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.545359 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.545375 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.577811 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" event={"ID":"4da62509-e117-444b-9f78-c5c9e52b1b87","Type":"ContainerStarted","Data":"46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.577899 4734 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.592062 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.604621 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.622681 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.640508 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.649612 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.649680 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.649697 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.649723 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.649748 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.663855 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.679029 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.696550 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.711330 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.724263 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.738047 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.754037 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.754123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.754138 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.754164 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.754178 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.757683 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.776740 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.799309 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.814699 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.857750 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.857803 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.857814 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.857832 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.857844 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.960471 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.960511 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.960532 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.960553 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:33 crc kubenswrapper[4734]: I1125 09:28:33.960565 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:33Z","lastTransitionTime":"2025-11-25T09:28:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.063671 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.063724 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.063737 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.063754 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.063766 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.166833 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.166923 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.166940 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.166957 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.166969 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.246549 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:34 crc kubenswrapper[4734]: E1125 09:28:34.246723 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.246756 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:34 crc kubenswrapper[4734]: E1125 09:28:34.246854 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.246848 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:34 crc kubenswrapper[4734]: E1125 09:28:34.246920 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.269012 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.269066 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.269084 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.269119 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.269155 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.370933 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.371012 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.371038 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.371068 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.371138 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.474803 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.474863 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.474874 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.474893 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.474909 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.577450 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.577494 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.577506 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.577526 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.577537 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.579839 4734 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.680593 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.680640 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.680653 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.680670 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.680689 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.783675 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.783723 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.783736 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.783756 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.783770 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.887201 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.887278 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.887292 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.887313 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.887328 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.990064 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.990144 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.990154 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.990171 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:34 crc kubenswrapper[4734]: I1125 09:28:34.990183 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:34Z","lastTransitionTime":"2025-11-25T09:28:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.093041 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.093075 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.093086 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.093168 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.093177 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.198467 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.198547 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.198563 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.198582 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.198596 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.301387 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.301450 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.301461 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.301482 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.301496 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.404411 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.404476 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.404486 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.404505 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.404516 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.507205 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.507252 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.507263 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.507283 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.507294 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.583881 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/0.log" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.586686 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848" exitCode=1 Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.586727 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.587441 4734 scope.go:117] "RemoveContainer" containerID="bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.607938 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.611064 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.611146 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.611159 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.611182 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.611194 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.621395 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.644542 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.662831 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.678514 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.692647 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.709842 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.713896 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.713948 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.713959 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.713976 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.713988 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.738545 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.762230 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.791629 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:34Z\\\",\\\"message\\\":\\\" removal\\\\nI1125 09:28:34.671394 6041 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:28:34.671416 6041 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:28:34.671429 6041 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:28:34.671438 6041 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:28:34.671444 6041 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:28:34.671447 6041 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:28:34.671455 6041 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:28:34.671458 6041 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:28:34.671461 6041 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:28:34.671468 6041 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:28:34.671478 6041 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:28:34.671491 6041 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:28:34.671492 6041 factory.go:656] Stopping watch factory\\\\nI1125 09:28:34.671500 6041 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:28:34.671511 6041 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:34.671519 6041 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.810533 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.816052 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.816141 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.816157 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.816178 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.816190 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.829154 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.845768 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.860154 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.919705 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.919759 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.919770 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.919790 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.919802 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:35Z","lastTransitionTime":"2025-11-25T09:28:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.954878 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg"] Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.955523 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.959137 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.960453 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.972021 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:35 crc kubenswrapper[4734]: I1125 09:28:35.988086 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.007409 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:34Z\\\",\\\"message\\\":\\\" removal\\\\nI1125 09:28:34.671394 6041 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:28:34.671416 6041 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:28:34.671429 6041 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:28:34.671438 6041 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:28:34.671444 6041 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:28:34.671447 6041 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:28:34.671455 6041 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:28:34.671458 6041 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:28:34.671461 6041 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:28:34.671468 6041 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:28:34.671478 6041 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:28:34.671491 6041 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:28:34.671492 6041 factory.go:656] Stopping watch factory\\\\nI1125 09:28:34.671500 6041 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:28:34.671511 6041 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:34.671519 6041 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.022259 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.022308 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.022325 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.022348 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.022366 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.022758 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.036618 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.049390 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.059158 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/83557809-7515-4cc3-afab-e940ed4b823f-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.059211 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/83557809-7515-4cc3-afab-e940ed4b823f-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.059376 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qks2b\" (UniqueName: \"kubernetes.io/projected/83557809-7515-4cc3-afab-e940ed4b823f-kube-api-access-qks2b\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.059523 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/83557809-7515-4cc3-afab-e940ed4b823f-env-overrides\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.072134 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.090440 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.102174 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.116342 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.124531 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.124591 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.124604 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.124621 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.124633 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.127151 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.138015 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.150131 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.159860 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.160130 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/83557809-7515-4cc3-afab-e940ed4b823f-env-overrides\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.160225 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/83557809-7515-4cc3-afab-e940ed4b823f-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.160262 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/83557809-7515-4cc3-afab-e940ed4b823f-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.160318 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qks2b\" (UniqueName: \"kubernetes.io/projected/83557809-7515-4cc3-afab-e940ed4b823f-kube-api-access-qks2b\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.160803 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/83557809-7515-4cc3-afab-e940ed4b823f-env-overrides\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.160921 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/83557809-7515-4cc3-afab-e940ed4b823f-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.165674 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/83557809-7515-4cc3-afab-e940ed4b823f-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.173065 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.178282 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qks2b\" (UniqueName: \"kubernetes.io/projected/83557809-7515-4cc3-afab-e940ed4b823f-kube-api-access-qks2b\") pod \"ovnkube-control-plane-749d76644c-5xjzg\" (UID: \"83557809-7515-4cc3-afab-e940ed4b823f\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.228602 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.228650 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.228661 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.228679 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.228692 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.246982 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:36 crc kubenswrapper[4734]: E1125 09:28:36.247139 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.247402 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:36 crc kubenswrapper[4734]: E1125 09:28:36.247590 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.247651 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:36 crc kubenswrapper[4734]: E1125 09:28:36.247701 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.267864 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.331139 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.331193 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.331206 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.331227 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.331240 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.434325 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.434385 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.434395 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.434414 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.434429 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.537142 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.537222 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.537255 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.537274 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.537288 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.591708 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" event={"ID":"83557809-7515-4cc3-afab-e940ed4b823f","Type":"ContainerStarted","Data":"3e11c3b3e8c3d286a27ad9d1b236383eec04a40e79a48872d0b39f5caede0ede"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.640661 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.640721 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.640733 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.640751 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.640764 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.668161 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-tfr8m"] Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.669064 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:36 crc kubenswrapper[4734]: E1125 09:28:36.669161 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.684491 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.700233 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.715129 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.739640 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.743400 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.743474 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.743491 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.743516 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.743530 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.758373 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.765896 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.765957 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj7s9\" (UniqueName: \"kubernetes.io/projected/54363663-3559-4203-bf8f-03e3bf4d1127-kube-api-access-xj7s9\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.785090 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.795858 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.809057 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.825018 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.838399 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.845406 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.845445 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.845456 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.845474 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.845486 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.852325 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.867354 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.867395 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj7s9\" (UniqueName: \"kubernetes.io/projected/54363663-3559-4203-bf8f-03e3bf4d1127-kube-api-access-xj7s9\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:36 crc kubenswrapper[4734]: E1125 09:28:36.867737 4734 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:36 crc kubenswrapper[4734]: E1125 09:28:36.867907 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs podName:54363663-3559-4203-bf8f-03e3bf4d1127 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:37.367874692 +0000 UTC m=+40.178336876 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs") pod "network-metrics-daemon-tfr8m" (UID: "54363663-3559-4203-bf8f-03e3bf4d1127") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.884588 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.887053 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj7s9\" (UniqueName: \"kubernetes.io/projected/54363663-3559-4203-bf8f-03e3bf4d1127-kube-api-access-xj7s9\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.897310 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.911154 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.938537 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:34Z\\\",\\\"message\\\":\\\" removal\\\\nI1125 09:28:34.671394 6041 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:28:34.671416 6041 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:28:34.671429 6041 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:28:34.671438 6041 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:28:34.671444 6041 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:28:34.671447 6041 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:28:34.671455 6041 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:28:34.671458 6041 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:28:34.671461 6041 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:28:34.671468 6041 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:28:34.671478 6041 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:28:34.671491 6041 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:28:34.671492 6041 factory.go:656] Stopping watch factory\\\\nI1125 09:28:34.671500 6041 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:28:34.671511 6041 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:34.671519 6041 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.948749 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.948802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.948814 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.948834 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.948847 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:36Z","lastTransitionTime":"2025-11-25T09:28:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:36 crc kubenswrapper[4734]: I1125 09:28:36.952459 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:36Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.051027 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.051062 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.051070 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.051098 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.051108 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.154494 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.154551 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.154562 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.154580 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.154592 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.257645 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.257694 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.257705 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.257723 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.257736 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.360538 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.360600 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.360611 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.360626 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.360640 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.374351 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:37 crc kubenswrapper[4734]: E1125 09:28:37.374534 4734 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:37 crc kubenswrapper[4734]: E1125 09:28:37.374600 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs podName:54363663-3559-4203-bf8f-03e3bf4d1127 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:38.374579252 +0000 UTC m=+41.185041246 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs") pod "network-metrics-daemon-tfr8m" (UID: "54363663-3559-4203-bf8f-03e3bf4d1127") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.462799 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.462844 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.462858 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.462873 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.462883 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.565935 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.565986 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.565998 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.566019 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.566031 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.595554 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" event={"ID":"83557809-7515-4cc3-afab-e940ed4b823f","Type":"ContainerStarted","Data":"137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.601209 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/0.log" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.603986 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.668306 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.668368 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.668385 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.668407 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.668423 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.772337 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.772378 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.772425 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.772453 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.772471 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.875321 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.875367 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.875380 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.875397 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.875409 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.978177 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.978268 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.978283 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.978312 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.978325 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:37Z","lastTransitionTime":"2025-11-25T09:28:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.980599 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.980750 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:37 crc kubenswrapper[4734]: E1125 09:28:37.980796 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:28:53.980776741 +0000 UTC m=+56.791238725 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:28:37 crc kubenswrapper[4734]: I1125 09:28:37.980821 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:37 crc kubenswrapper[4734]: E1125 09:28:37.980850 4734 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:37 crc kubenswrapper[4734]: E1125 09:28:37.980892 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:53.980881645 +0000 UTC m=+56.791343639 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:37 crc kubenswrapper[4734]: E1125 09:28:37.980938 4734 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:37 crc kubenswrapper[4734]: E1125 09:28:37.980966 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:53.980959157 +0000 UTC m=+56.791421151 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.080954 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.081021 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.081035 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.081056 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.081070 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.081433 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.081481 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.081644 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.081669 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.081682 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.081690 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.081698 4734 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.081704 4734 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.081774 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:54.081750096 +0000 UTC m=+56.892212260 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.081805 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:54.081795908 +0000 UTC m=+56.892258132 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.185123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.185179 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.185191 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.185210 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.185224 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.246245 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.246282 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.246244 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.246390 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.246405 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.246448 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.246499 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.246547 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.287859 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.287901 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.287911 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.287929 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.287942 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.385387 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.385644 4734 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:38 crc kubenswrapper[4734]: E1125 09:28:38.385768 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs podName:54363663-3559-4203-bf8f-03e3bf4d1127 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:40.385743573 +0000 UTC m=+43.196205747 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs") pod "network-metrics-daemon-tfr8m" (UID: "54363663-3559-4203-bf8f-03e3bf4d1127") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.390615 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.390680 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.390690 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.390706 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.390718 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.494035 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.494075 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.494090 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.494128 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.494143 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.596554 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.596593 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.596605 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.596623 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.596635 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.608950 4734 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.609971 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" event={"ID":"83557809-7515-4cc3-afab-e940ed4b823f","Type":"ContainerStarted","Data":"bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.624875 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.646315 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.666538 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:34Z\\\",\\\"message\\\":\\\" removal\\\\nI1125 09:28:34.671394 6041 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:28:34.671416 6041 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:28:34.671429 6041 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:28:34.671438 6041 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:28:34.671444 6041 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:28:34.671447 6041 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:28:34.671455 6041 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:28:34.671458 6041 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:28:34.671461 6041 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:28:34.671468 6041 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:28:34.671478 6041 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:28:34.671491 6041 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:28:34.671492 6041 factory.go:656] Stopping watch factory\\\\nI1125 09:28:34.671500 6041 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:28:34.671511 6041 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:34.671519 6041 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.684455 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.699159 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.699199 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.699211 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.699228 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.699242 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.703200 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.717302 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.738619 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.753372 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.769023 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.785049 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.800835 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.802888 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.802944 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.802964 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.802991 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.803002 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.817006 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.831331 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.845627 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.857994 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.870112 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.905193 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.905246 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.905259 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.905276 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:38 crc kubenswrapper[4734]: I1125 09:28:38.905289 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:38Z","lastTransitionTime":"2025-11-25T09:28:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.007628 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.007672 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.007688 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.007704 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.007715 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.110998 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.111057 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.111066 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.111090 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.111123 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.121730 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.121785 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.121797 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.121819 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.121831 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: E1125 09:28:39.134855 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.139199 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.139246 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.139258 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.139273 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.139284 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: E1125 09:28:39.151455 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.156311 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.156350 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.156359 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.156375 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.156386 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: E1125 09:28:39.169636 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.176268 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.176338 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.176354 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.176376 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.176390 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: E1125 09:28:39.188749 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.193944 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.194018 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.194031 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.194053 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.194066 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: E1125 09:28:39.208595 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: E1125 09:28:39.208854 4734 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.213810 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.214194 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.214283 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.214375 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.214470 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.317759 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.317799 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.317809 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.317825 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.317836 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.420857 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.420902 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.420916 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.420992 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.421053 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.525206 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.525504 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.525521 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.525540 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.525553 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.628729 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.628783 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.628796 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.628812 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.628824 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.635752 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.651965 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.666992 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.681291 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.694321 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.709140 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.723241 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.731746 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.731802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.731814 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.731836 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.731849 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.736626 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.749481 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.762582 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.779918 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.793012 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.809161 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.824787 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.834927 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.834979 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.834994 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.835018 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.835035 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.841182 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.868500 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:34Z\\\",\\\"message\\\":\\\" removal\\\\nI1125 09:28:34.671394 6041 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:28:34.671416 6041 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:28:34.671429 6041 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:28:34.671438 6041 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:28:34.671444 6041 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:28:34.671447 6041 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:28:34.671455 6041 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:28:34.671458 6041 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:28:34.671461 6041 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:28:34.671468 6041 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:28:34.671478 6041 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:28:34.671491 6041 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:28:34.671492 6041 factory.go:656] Stopping watch factory\\\\nI1125 09:28:34.671500 6041 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:28:34.671511 6041 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:34.671519 6041 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.937515 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.937560 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.937571 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.937588 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:39 crc kubenswrapper[4734]: I1125 09:28:39.937600 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:39Z","lastTransitionTime":"2025-11-25T09:28:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.040331 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.040393 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.040413 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.040439 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.040456 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.142736 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.142794 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.142804 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.142824 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.142834 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.245844 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.245881 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.245891 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.245905 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.245923 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.246010 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:40 crc kubenswrapper[4734]: E1125 09:28:40.246172 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.246220 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.246233 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:40 crc kubenswrapper[4734]: E1125 09:28:40.246298 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.246303 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:40 crc kubenswrapper[4734]: E1125 09:28:40.246414 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:40 crc kubenswrapper[4734]: E1125 09:28:40.246591 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.261455 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.278673 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.290888 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.301490 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.311624 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.324817 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.335391 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.349350 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.349811 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.349898 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.349917 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.349927 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.361445 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:34Z\\\",\\\"message\\\":\\\" removal\\\\nI1125 09:28:34.671394 6041 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:28:34.671416 6041 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:28:34.671429 6041 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:28:34.671438 6041 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:28:34.671444 6041 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:28:34.671447 6041 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:28:34.671455 6041 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:28:34.671458 6041 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:28:34.671461 6041 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:28:34.671468 6041 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:28:34.671478 6041 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:28:34.671491 6041 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:28:34.671492 6041 factory.go:656] Stopping watch factory\\\\nI1125 09:28:34.671500 6041 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:28:34.671511 6041 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:34.671519 6041 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.378346 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.397184 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.406722 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:40 crc kubenswrapper[4734]: E1125 09:28:40.407043 4734 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:40 crc kubenswrapper[4734]: E1125 09:28:40.407205 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs podName:54363663-3559-4203-bf8f-03e3bf4d1127 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:44.407181318 +0000 UTC m=+47.217643312 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs") pod "network-metrics-daemon-tfr8m" (UID: "54363663-3559-4203-bf8f-03e3bf4d1127") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.415197 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.428167 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.443474 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.451929 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.451973 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.451985 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.452002 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.452013 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.458058 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.468045 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.475783 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.554245 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.554285 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.554294 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.554310 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.554326 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.618045 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/1.log" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.618798 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/0.log" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.621263 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d" exitCode=1 Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.621304 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.621344 4734 scope.go:117] "RemoveContainer" containerID="bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.622049 4734 scope.go:117] "RemoveContainer" containerID="7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d" Nov 25 09:28:40 crc kubenswrapper[4734]: E1125 09:28:40.622213 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.641126 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.652013 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.656944 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.656981 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.656989 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.657008 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.657019 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.663744 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.675809 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.688114 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.701355 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.715692 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.728527 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.738427 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.756597 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bfc42f20660ab24ccab60c98f5853ee99c8bee7de72b2499c2946480f856b848\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:34Z\\\",\\\"message\\\":\\\" removal\\\\nI1125 09:28:34.671394 6041 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 09:28:34.671416 6041 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:28:34.671429 6041 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:28:34.671438 6041 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 09:28:34.671444 6041 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 09:28:34.671447 6041 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:28:34.671455 6041 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:28:34.671458 6041 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 09:28:34.671461 6041 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:28:34.671468 6041 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:28:34.671478 6041 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1125 09:28:34.671491 6041 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:28:34.671492 6041 factory.go:656] Stopping watch factory\\\\nI1125 09:28:34.671500 6041 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:28:34.671511 6041 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:34.671519 6041 handler.go:208] Removed *v1.Node event handler 2\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"ver-crc in node crc\\\\nI1125 09:28:39.794496 6216 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1125 09:28:39.794505 6216 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 09:28:39.794528 6216 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794443 6216 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:39.794548 6216 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794559 6216 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-tfr8m in node crc\\\\nI1125 09:28:39.794591 6216 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1125 09:28:39.794608 6216 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-tfr8m] creating logical port openshift-multus_network-metrics-daemon-tfr8m for pod on switch crc\\\\nF1125 09:28:39.794668 6216 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.759453 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.759493 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.759505 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.759524 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.759538 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.769989 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.786169 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.801763 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.816088 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.829202 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.841625 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.861960 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.862005 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.862017 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.862031 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.862042 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.964572 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.964636 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.964652 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.964675 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:40 crc kubenswrapper[4734]: I1125 09:28:40.964691 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:40Z","lastTransitionTime":"2025-11-25T09:28:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.067835 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.067894 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.067906 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.067927 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.067941 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.170141 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.170179 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.170192 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.170209 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.170220 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.272713 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.273047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.273195 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.273226 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.273239 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.375973 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.376040 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.376053 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.376074 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.376111 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.478288 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.478354 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.478372 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.478398 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.478414 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.580625 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.580663 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.580674 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.580692 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.580706 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.627663 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/1.log" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.676367 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.677538 4734 scope.go:117] "RemoveContainer" containerID="7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d" Nov 25 09:28:41 crc kubenswrapper[4734]: E1125 09:28:41.677743 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.683378 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.683428 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.683442 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.683460 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.683473 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.692064 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.703982 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.715962 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.728809 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.738663 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.750277 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.764691 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.778512 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.786016 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.786077 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.786109 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.786128 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.786143 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.795205 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.813728 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"ver-crc in node crc\\\\nI1125 09:28:39.794496 6216 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1125 09:28:39.794505 6216 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 09:28:39.794528 6216 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794443 6216 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:39.794548 6216 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794559 6216 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-tfr8m in node crc\\\\nI1125 09:28:39.794591 6216 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1125 09:28:39.794608 6216 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-tfr8m] creating logical port openshift-multus_network-metrics-daemon-tfr8m for pod on switch crc\\\\nF1125 09:28:39.794668 6216 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.828258 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.846030 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.861301 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.876068 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.889123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.889171 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.889182 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.889196 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.889208 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.890548 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.901125 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.991627 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.991678 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.991690 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.991709 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:41 crc kubenswrapper[4734]: I1125 09:28:41.991721 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:41Z","lastTransitionTime":"2025-11-25T09:28:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.094413 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.094467 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.094479 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.094500 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.094514 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.197835 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.197889 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.197898 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.197914 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.197923 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.246457 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.246569 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.246525 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.246472 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:42 crc kubenswrapper[4734]: E1125 09:28:42.246681 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.247297 4734 scope.go:117] "RemoveContainer" containerID="01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999" Nov 25 09:28:42 crc kubenswrapper[4734]: E1125 09:28:42.247764 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:42 crc kubenswrapper[4734]: E1125 09:28:42.247867 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:42 crc kubenswrapper[4734]: E1125 09:28:42.247930 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.300320 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.300387 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.300400 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.300417 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.300431 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.402739 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.402810 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.402835 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.402859 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.402876 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.506320 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.506362 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.506375 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.506393 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.506407 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.608667 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.608717 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.608728 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.608744 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.608757 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.710870 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.710914 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.710923 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.710938 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.710949 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.815015 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.815064 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.815076 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.815112 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.815127 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.922285 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.922342 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.922358 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.922378 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:42 crc kubenswrapper[4734]: I1125 09:28:42.922391 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:42Z","lastTransitionTime":"2025-11-25T09:28:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.025451 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.025492 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.025502 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.025520 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.025533 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.128047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.128098 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.128107 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.128122 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.128132 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.230589 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.230638 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.230651 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.230668 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.230679 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.333452 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.333493 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.333505 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.333523 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.333535 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.435940 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.435985 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.435996 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.436012 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.436023 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.541262 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.541312 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.541320 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.541335 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.541348 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.640251 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.643241 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.643277 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.643288 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.643305 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.643317 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.643446 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.643891 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.659069 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.672210 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.688426 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.706053 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.720769 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.733232 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.747582 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.747629 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.747644 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.747670 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.747683 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.748111 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.760353 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.774307 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.789038 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.807955 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.829335 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"ver-crc in node crc\\\\nI1125 09:28:39.794496 6216 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1125 09:28:39.794505 6216 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 09:28:39.794528 6216 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794443 6216 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:39.794548 6216 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794559 6216 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-tfr8m in node crc\\\\nI1125 09:28:39.794591 6216 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1125 09:28:39.794608 6216 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-tfr8m] creating logical port openshift-multus_network-metrics-daemon-tfr8m for pod on switch crc\\\\nF1125 09:28:39.794668 6216 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.850689 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.850733 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.850744 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.850760 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.850774 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.851638 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.867290 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.882602 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.896929 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.954546 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.954617 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.954631 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.954653 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:43 crc kubenswrapper[4734]: I1125 09:28:43.954667 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:43Z","lastTransitionTime":"2025-11-25T09:28:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.058428 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.058482 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.058500 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.058524 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.058540 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.161326 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.161370 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.161379 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.161396 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.161414 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.246797 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.246850 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.246942 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:44 crc kubenswrapper[4734]: E1125 09:28:44.246955 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:44 crc kubenswrapper[4734]: E1125 09:28:44.247033 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:44 crc kubenswrapper[4734]: E1125 09:28:44.247118 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.247312 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:44 crc kubenswrapper[4734]: E1125 09:28:44.247468 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.263525 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.263751 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.263770 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.263789 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.263803 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.366893 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.366938 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.366949 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.366966 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.366978 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.451894 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:44 crc kubenswrapper[4734]: E1125 09:28:44.452111 4734 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:44 crc kubenswrapper[4734]: E1125 09:28:44.452182 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs podName:54363663-3559-4203-bf8f-03e3bf4d1127 nodeName:}" failed. No retries permitted until 2025-11-25 09:28:52.452164033 +0000 UTC m=+55.262626037 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs") pod "network-metrics-daemon-tfr8m" (UID: "54363663-3559-4203-bf8f-03e3bf4d1127") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.470131 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.470184 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.470197 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.470213 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.470224 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.572954 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.572985 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.572993 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.573007 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.573016 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.676042 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.676168 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.676185 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.676211 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.676227 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.780296 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.780361 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.780380 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.780406 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.780420 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.884304 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.884352 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.884369 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.884389 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.884403 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.987839 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.987937 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.987949 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.987969 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:44 crc kubenswrapper[4734]: I1125 09:28:44.987983 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:44Z","lastTransitionTime":"2025-11-25T09:28:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.090681 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.090719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.090727 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.090741 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.090750 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.193772 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.193837 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.193851 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.193870 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.193883 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.296906 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.296968 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.296981 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.296999 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.297011 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.399994 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.400045 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.400061 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.400078 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.400114 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.502572 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.502638 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.502660 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.502689 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.502712 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.605467 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.605536 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.605558 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.605587 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.605611 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.708757 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.708819 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.708844 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.708867 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.708885 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.812380 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.812430 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.812443 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.812459 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.812469 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.915678 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.915742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.915752 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.915767 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:45 crc kubenswrapper[4734]: I1125 09:28:45.915777 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:45Z","lastTransitionTime":"2025-11-25T09:28:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.018822 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.018869 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.018880 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.018898 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.018911 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.122004 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.122046 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.122054 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.122070 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.122237 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.225345 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.225425 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.225447 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.225489 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.225525 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.250579 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.250701 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.250588 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:46 crc kubenswrapper[4734]: E1125 09:28:46.259366 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.259433 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:46 crc kubenswrapper[4734]: E1125 09:28:46.259549 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:46 crc kubenswrapper[4734]: E1125 09:28:46.259617 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:46 crc kubenswrapper[4734]: E1125 09:28:46.259916 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.328409 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.328456 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.328464 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.328481 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.328492 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.431567 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.431642 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.431654 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.432292 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.432379 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.534951 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.535022 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.535035 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.535052 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.535062 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.637732 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.637807 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.637828 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.637856 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.637874 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.740734 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.740819 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.740833 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.740855 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.740874 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.843349 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.843407 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.843418 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.843437 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.843449 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.946880 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.946939 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.946951 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.946970 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:46 crc kubenswrapper[4734]: I1125 09:28:46.946983 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:46Z","lastTransitionTime":"2025-11-25T09:28:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.050108 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.050161 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.050171 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.050188 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.050200 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.152566 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.152598 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.152607 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.152622 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.152632 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.254819 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.254866 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.254875 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.254891 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.254903 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.357638 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.357686 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.357694 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.357712 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.357726 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.461035 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.461078 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.461109 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.461127 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.461140 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.563360 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.563425 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.563438 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.563455 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.563467 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.666675 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.667015 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.667121 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.667218 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.667307 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.770123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.770215 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.770227 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.770244 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.770276 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.873123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.873450 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.873548 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.873696 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.873790 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.976814 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.976854 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.976868 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.976887 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:47 crc kubenswrapper[4734]: I1125 09:28:47.976899 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:47Z","lastTransitionTime":"2025-11-25T09:28:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.080142 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.080186 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.080199 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.080216 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.080229 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.183057 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.183137 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.183147 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.183160 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.183171 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.246634 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.246733 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.246777 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:48 crc kubenswrapper[4734]: E1125 09:28:48.246813 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:48 crc kubenswrapper[4734]: E1125 09:28:48.246922 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:48 crc kubenswrapper[4734]: E1125 09:28:48.247126 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.247182 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:48 crc kubenswrapper[4734]: E1125 09:28:48.247268 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.286241 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.286330 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.286347 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.286653 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.286736 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.405283 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.405347 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.405357 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.405378 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.405389 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.507778 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.507833 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.507845 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.507865 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.507875 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.610891 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.610943 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.610955 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.610972 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.610987 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.713647 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.713694 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.713703 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.713718 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.713730 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.816004 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.816048 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.816058 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.816078 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.816101 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.919566 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.919613 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.919624 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.919642 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:48 crc kubenswrapper[4734]: I1125 09:28:48.919654 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:48Z","lastTransitionTime":"2025-11-25T09:28:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.022643 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.022701 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.022714 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.022731 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.022744 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.125224 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.125277 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.125289 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.125307 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.125320 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.229959 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.230029 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.230042 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.230068 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.230109 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.333074 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.333146 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.333158 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.333188 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.333203 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.436061 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.436135 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.436146 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.436162 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.436171 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.538943 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.538991 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.539005 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.539031 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.539047 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.567890 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.567951 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.567962 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.567981 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.567998 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: E1125 09:28:49.587184 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.592230 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.592275 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.592284 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.592299 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.592313 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: E1125 09:28:49.610955 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.618342 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.618386 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.618404 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.618422 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.618436 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: E1125 09:28:49.633735 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.637936 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.638006 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.638019 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.638038 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.638050 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: E1125 09:28:49.650566 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.654835 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.654868 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.654878 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.654893 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.654923 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: E1125 09:28:49.668229 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:49 crc kubenswrapper[4734]: E1125 09:28:49.668422 4734 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.670052 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.670122 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.670136 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.670155 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.670169 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.773209 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.773314 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.773326 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.773344 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.773355 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.876258 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.876364 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.876384 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.876413 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.876430 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.980464 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.980513 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.980526 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.980547 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:49 crc kubenswrapper[4734]: I1125 09:28:49.980562 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:49Z","lastTransitionTime":"2025-11-25T09:28:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.083836 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.083876 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.083886 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.083903 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.083914 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.188437 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.188496 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.188511 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.188532 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.188545 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.246057 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.246067 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.246136 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:50 crc kubenswrapper[4734]: E1125 09:28:50.246853 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.247221 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:50 crc kubenswrapper[4734]: E1125 09:28:50.247302 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:50 crc kubenswrapper[4734]: E1125 09:28:50.247378 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:50 crc kubenswrapper[4734]: E1125 09:28:50.247446 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.268288 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.289287 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"ver-crc in node crc\\\\nI1125 09:28:39.794496 6216 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1125 09:28:39.794505 6216 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 09:28:39.794528 6216 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794443 6216 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:39.794548 6216 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794559 6216 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-tfr8m in node crc\\\\nI1125 09:28:39.794591 6216 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1125 09:28:39.794608 6216 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-tfr8m] creating logical port openshift-multus_network-metrics-daemon-tfr8m for pod on switch crc\\\\nF1125 09:28:39.794668 6216 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.290945 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.290997 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.291011 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.291041 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.291053 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.306307 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.321751 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.336391 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.355556 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.374716 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.388853 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.393176 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.393242 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.393257 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.393275 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.393288 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.400861 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.413278 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.425443 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.435155 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.449762 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.465385 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.476738 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.494226 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.495846 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.495894 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.495906 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.495927 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.495940 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.598743 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.598790 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.598800 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.598818 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.598829 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.701335 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.701422 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.701436 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.701458 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.701470 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.804824 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.804881 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.804932 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.804957 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.804973 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.909338 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.909375 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.909384 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.909400 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:50 crc kubenswrapper[4734]: I1125 09:28:50.909410 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:50Z","lastTransitionTime":"2025-11-25T09:28:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.013066 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.013187 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.013213 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.013242 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.013262 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.115898 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.116047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.116078 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.116167 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.116201 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.219830 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.219911 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.219931 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.219962 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.219986 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.322900 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.322940 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.322949 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.322964 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.322974 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.426966 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.427039 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.427058 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.427112 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.427136 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.530174 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.530226 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.530240 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.530261 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.530278 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.633929 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.634000 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.634019 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.634046 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.634067 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.737043 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.737111 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.737122 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.737141 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.737152 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.840424 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.840511 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.840522 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.840549 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.840561 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.943956 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.943998 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.944010 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.944029 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:51 crc kubenswrapper[4734]: I1125 09:28:51.944044 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:51Z","lastTransitionTime":"2025-11-25T09:28:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.047441 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.047479 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.047487 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.047505 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.047518 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.151020 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.151161 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.151181 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.151251 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.151272 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.246968 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.247024 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.246968 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.246969 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:52 crc kubenswrapper[4734]: E1125 09:28:52.247252 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:52 crc kubenswrapper[4734]: E1125 09:28:52.247432 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:52 crc kubenswrapper[4734]: E1125 09:28:52.247604 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:52 crc kubenswrapper[4734]: E1125 09:28:52.247737 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.254707 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.254988 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.255163 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.255292 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.255406 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.358665 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.358715 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.358728 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.358802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.358814 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.462585 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.462638 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.462656 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.462684 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.462703 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.544763 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:52 crc kubenswrapper[4734]: E1125 09:28:52.544978 4734 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:52 crc kubenswrapper[4734]: E1125 09:28:52.545069 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs podName:54363663-3559-4203-bf8f-03e3bf4d1127 nodeName:}" failed. No retries permitted until 2025-11-25 09:29:08.545044258 +0000 UTC m=+71.355506282 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs") pod "network-metrics-daemon-tfr8m" (UID: "54363663-3559-4203-bf8f-03e3bf4d1127") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.566247 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.566298 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.566310 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.566330 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.566344 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.668787 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.668840 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.668849 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.668869 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.668880 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.742493 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.759029 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.763130 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.775348 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.775416 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.775430 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.775453 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.775465 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.777679 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.789459 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.803540 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.819931 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.834320 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.848118 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.863290 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.877916 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.877971 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.877986 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.878003 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.878016 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.881268 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.904984 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"ver-crc in node crc\\\\nI1125 09:28:39.794496 6216 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1125 09:28:39.794505 6216 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 09:28:39.794528 6216 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794443 6216 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:39.794548 6216 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794559 6216 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-tfr8m in node crc\\\\nI1125 09:28:39.794591 6216 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1125 09:28:39.794608 6216 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-tfr8m] creating logical port openshift-multus_network-metrics-daemon-tfr8m for pod on switch crc\\\\nF1125 09:28:39.794668 6216 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.921066 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.933838 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.946330 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.960591 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.975749 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.980375 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.980445 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.980463 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.980489 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.980507 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:52Z","lastTransitionTime":"2025-11-25T09:28:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:52 crc kubenswrapper[4734]: I1125 09:28:52.987569 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.083293 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.083366 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.083389 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.083420 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.083446 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.186808 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.186848 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.186859 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.186876 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.186888 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.289491 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.289736 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.289750 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.289883 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.289902 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.392230 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.392289 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.392308 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.392332 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.392351 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.495483 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.495515 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.495524 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.495540 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.495551 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.602890 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.602966 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.602989 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.603026 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.603051 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.705853 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.705911 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.705921 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.705940 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.705955 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.809194 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.809275 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.809298 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.809332 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.809354 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.912282 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.912339 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.912353 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.912373 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:53 crc kubenswrapper[4734]: I1125 09:28:53.912385 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:53Z","lastTransitionTime":"2025-11-25T09:28:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.014685 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.014742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.014753 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.014768 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.014778 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.059667 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.059876 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.059913 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:29:26.059873228 +0000 UTC m=+88.870335242 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.059991 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.060117 4734 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.060462 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:29:26.060250919 +0000 UTC m=+88.870713113 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.060614 4734 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.060761 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:29:26.060728213 +0000 UTC m=+88.871190237 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.118772 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.118827 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.118844 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.118869 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.118890 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.161744 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.161806 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.161982 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.162005 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.162015 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.162030 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.162033 4734 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.162044 4734 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.162125 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:29:26.16210916 +0000 UTC m=+88.972571154 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.162144 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:29:26.162138261 +0000 UTC m=+88.972600255 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.231208 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.231255 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.231267 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.231284 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.231294 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.246579 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.246557 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.246637 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.246638 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.246687 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.246792 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.246915 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:54 crc kubenswrapper[4734]: E1125 09:28:54.247028 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.333832 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.333897 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.333910 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.333949 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.333985 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.437650 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.437754 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.437770 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.437795 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.437807 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.541150 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.541207 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.541229 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.541255 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.541274 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.645016 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.645618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.645774 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.645918 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.645940 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.749172 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.749234 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.749245 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.749264 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.749275 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.852495 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.852559 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.852571 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.852592 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.852605 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.955455 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.955502 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.955511 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.955529 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:54 crc kubenswrapper[4734]: I1125 09:28:54.955539 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:54Z","lastTransitionTime":"2025-11-25T09:28:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.058339 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.058431 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.058464 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.058500 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.058524 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.161255 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.161320 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.161331 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.161350 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.161363 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.264118 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.264171 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.264183 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.264201 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.264213 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.367156 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.367251 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.367265 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.367281 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.367294 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.470190 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.470241 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.470252 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.470308 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.470324 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.572414 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.572721 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.572796 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.572864 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.572925 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.675509 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.675930 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.676157 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.676377 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.676511 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.778830 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.778865 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.778875 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.778891 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.778903 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.881250 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.881581 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.881675 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.881762 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.881828 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.984691 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.984750 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.984766 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.984783 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:55 crc kubenswrapper[4734]: I1125 09:28:55.984795 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:55Z","lastTransitionTime":"2025-11-25T09:28:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.086714 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.086751 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.086759 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.086774 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.086783 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.189908 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.189981 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.189999 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.190024 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.190043 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.246282 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.246231 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.246302 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:56 crc kubenswrapper[4734]: E1125 09:28:56.246424 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:56 crc kubenswrapper[4734]: E1125 09:28:56.246513 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.246541 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:56 crc kubenswrapper[4734]: E1125 09:28:56.246571 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:56 crc kubenswrapper[4734]: E1125 09:28:56.246979 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.247192 4734 scope.go:117] "RemoveContainer" containerID="7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.295047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.295512 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.295533 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.295554 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.295567 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.398796 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.398841 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.398851 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.398875 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.398888 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.501789 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.501837 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.501848 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.501871 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.501891 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.604416 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.604461 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.604471 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.604487 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.604497 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.706822 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.706860 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.706868 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.706901 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.706914 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.738165 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/1.log" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.741223 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.741816 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.764572 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.793906 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.809917 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.809974 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.809986 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.810017 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.810031 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.812342 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.825469 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.838920 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.855904 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.868881 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.885348 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.905111 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.912351 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.912386 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.912397 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.912414 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.912425 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:56Z","lastTransitionTime":"2025-11-25T09:28:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.934262 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"ver-crc in node crc\\\\nI1125 09:28:39.794496 6216 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1125 09:28:39.794505 6216 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 09:28:39.794528 6216 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794443 6216 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:39.794548 6216 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794559 6216 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-tfr8m in node crc\\\\nI1125 09:28:39.794591 6216 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1125 09:28:39.794608 6216 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-tfr8m] creating logical port openshift-multus_network-metrics-daemon-tfr8m for pod on switch crc\\\\nF1125 09:28:39.794668 6216 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.949694 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.963445 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.979127 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:56 crc kubenswrapper[4734]: I1125 09:28:56.991754 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:56Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.004294 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.014944 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.014988 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.014997 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.015012 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.015022 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.017858 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.029780 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.118047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.118111 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.118123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.118143 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.118157 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.221040 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.221105 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.221121 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.221141 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.221154 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.324229 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.324283 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.324292 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.324313 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.324324 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.427007 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.427044 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.427053 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.427068 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.427100 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.530397 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.530471 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.530491 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.530518 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.530553 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.632920 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.632966 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.632979 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.633004 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.633018 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.735916 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.735968 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.735999 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.736015 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.736024 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.746302 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/2.log" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.747146 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/1.log" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.749817 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682" exitCode=1 Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.749912 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.749991 4734 scope.go:117] "RemoveContainer" containerID="7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.750926 4734 scope.go:117] "RemoveContainer" containerID="646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682" Nov 25 09:28:57 crc kubenswrapper[4734]: E1125 09:28:57.751205 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.775388 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.789499 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.800856 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.811155 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.820502 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.833966 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.838419 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.838495 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.838517 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.838547 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.838571 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.847870 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.864650 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.876671 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.889307 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.911337 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.929154 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.941559 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.941625 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.941646 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.941693 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.941711 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:57Z","lastTransitionTime":"2025-11-25T09:28:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.947609 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.968266 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.980500 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:57 crc kubenswrapper[4734]: I1125 09:28:57.999672 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:57Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.016573 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7efda93c64d9ed49a9b97acb5d10a18ad9d521f6693d5c0463d2555446e0f17d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"message\\\":\\\"ver-crc in node crc\\\\nI1125 09:28:39.794496 6216 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1125 09:28:39.794505 6216 default_network_controller.go:776] Recording success event on pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 09:28:39.794528 6216 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794443 6216 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:28:39.794548 6216 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-tfr8m\\\\nI1125 09:28:39.794559 6216 ovn.go:134] Ensuring zone local for Pod openshift-multus/network-metrics-daemon-tfr8m in node crc\\\\nI1125 09:28:39.794591 6216 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1125 09:28:39.794608 6216 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-tfr8m] creating logical port openshift-multus_network-metrics-daemon-tfr8m for pod on switch crc\\\\nF1125 09:28:39.794668 6216 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.044342 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.044380 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.044389 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.044404 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.044415 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.147173 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.147221 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.147237 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.147256 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.147269 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.246704 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.246803 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.246938 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:28:58 crc kubenswrapper[4734]: E1125 09:28:58.246941 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.246735 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:28:58 crc kubenswrapper[4734]: E1125 09:28:58.247056 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:28:58 crc kubenswrapper[4734]: E1125 09:28:58.247318 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:28:58 crc kubenswrapper[4734]: E1125 09:28:58.247381 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.250736 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.250803 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.250827 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.250861 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.250885 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.353691 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.353740 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.353751 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.353770 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.353784 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.458198 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.458254 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.458266 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.458282 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.458295 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.561034 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.561163 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.561187 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.561218 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.561241 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.664294 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.664353 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.664368 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.664386 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.664400 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.760579 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/2.log" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.763993 4734 scope.go:117] "RemoveContainer" containerID="646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682" Nov 25 09:28:58 crc kubenswrapper[4734]: E1125 09:28:58.764165 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.767149 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.767190 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.767205 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.767221 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.767235 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.782759 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.798231 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.814805 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.828408 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.841024 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.854497 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.867565 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.869056 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.869110 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.869122 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.869140 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.869154 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.882292 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.892523 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.904222 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.918254 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.935242 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.954065 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.969570 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.971807 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.971851 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.971861 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.971881 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.971896 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:58Z","lastTransitionTime":"2025-11-25T09:28:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:58 crc kubenswrapper[4734]: I1125 09:28:58.991191 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.007008 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.022180 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.074551 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.074637 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.074661 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.074696 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.074717 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.095362 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.120340 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.131778 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.145785 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.164049 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.175884 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.177959 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.177992 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.178007 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.178025 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.178038 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.188374 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.202616 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.215236 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.226855 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.237661 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.255424 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.268037 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.280814 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.280886 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.280898 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.280917 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.280928 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.286889 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.301048 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.315256 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.328651 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.342445 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.384409 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.384484 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.384502 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.384530 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.384548 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.487509 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.487560 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.487578 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.487597 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.487609 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.590705 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.590747 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.590757 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.590774 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.590786 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.693765 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.693816 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.693826 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.693844 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.693862 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.738709 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.738753 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.738762 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.738775 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.738786 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: E1125 09:28:59.751583 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.755503 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.755541 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.755552 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.755571 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.755585 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: E1125 09:28:59.768287 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.771731 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.771777 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.771787 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.771804 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.771815 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: E1125 09:28:59.783069 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.787681 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.787791 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.787806 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.787832 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.787846 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: E1125 09:28:59.799712 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.803040 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.803106 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.803118 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.803138 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.803149 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: E1125 09:28:59.815211 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:28:59Z is after 2025-08-24T17:21:41Z" Nov 25 09:28:59 crc kubenswrapper[4734]: E1125 09:28:59.815350 4734 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.817264 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.817320 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.817332 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.817351 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.817365 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.920448 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.920495 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.920504 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.920522 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:28:59 crc kubenswrapper[4734]: I1125 09:28:59.920532 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:28:59Z","lastTransitionTime":"2025-11-25T09:28:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.024121 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.024197 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.024214 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.024236 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.024253 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.128099 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.128157 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.128174 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.128194 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.128209 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.230792 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.230867 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.230883 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.230904 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.230922 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.246702 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.246831 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:00 crc kubenswrapper[4734]: E1125 09:29:00.246888 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:00 crc kubenswrapper[4734]: E1125 09:29:00.246960 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.246720 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:00 crc kubenswrapper[4734]: E1125 09:29:00.247191 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.247266 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:00 crc kubenswrapper[4734]: E1125 09:29:00.247506 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.261074 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.277303 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.296849 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.311371 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.324128 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.334385 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.334449 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.334465 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.334489 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.334503 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.338895 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.358571 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.373655 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.388701 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.407503 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.419712 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.431775 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.436566 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.436622 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.436635 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.436653 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.436666 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.446070 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.459497 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.472509 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.485901 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.497482 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:00Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.539961 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.540028 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.540042 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.540060 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.540074 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.642579 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.642979 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.642992 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.643010 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.643020 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.745510 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.745821 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.745892 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.745982 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.746103 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.848579 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.848630 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.848641 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.848660 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.848674 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.951500 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.951536 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.951547 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.951560 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:00 crc kubenswrapper[4734]: I1125 09:29:00.951570 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:00Z","lastTransitionTime":"2025-11-25T09:29:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.054531 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.054575 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.054585 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.054603 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.054615 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.158285 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.158397 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.158417 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.158446 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.158463 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.261711 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.261778 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.261793 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.261813 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.261824 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.365385 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.365421 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.365432 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.365452 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.365461 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.468628 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.468738 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.468756 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.468775 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.468789 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.571862 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.571903 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.571914 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.571931 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.571945 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.674517 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.674575 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.674774 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.674799 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.674817 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.777352 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.777404 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.777414 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.777432 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.777446 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.880852 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.880908 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.880919 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.880941 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.881001 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.984114 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.984170 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.984187 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.984210 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:01 crc kubenswrapper[4734]: I1125 09:29:01.984228 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:01Z","lastTransitionTime":"2025-11-25T09:29:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.087168 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.087522 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.087609 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.087701 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.087783 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.190139 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.190168 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.190176 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.190190 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.190199 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.246582 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.246699 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.246630 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.246585 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:02 crc kubenswrapper[4734]: E1125 09:29:02.246927 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:02 crc kubenswrapper[4734]: E1125 09:29:02.246815 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:02 crc kubenswrapper[4734]: E1125 09:29:02.247151 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:02 crc kubenswrapper[4734]: E1125 09:29:02.247283 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.292736 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.292777 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.292790 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.292808 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.292819 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.396168 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.396218 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.396230 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.396247 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.396260 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.498666 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.498710 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.498719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.498734 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.498744 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.602345 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.602415 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.602427 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.602445 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.602457 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.705283 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.705325 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.705338 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.705353 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.705364 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.808054 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.808116 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.808128 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.808149 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.808163 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.911465 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.911530 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.911548 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.911568 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:02 crc kubenswrapper[4734]: I1125 09:29:02.911580 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:02Z","lastTransitionTime":"2025-11-25T09:29:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.015644 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.015704 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.015719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.015742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.015757 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.119413 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.119479 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.119492 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.119513 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.119529 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.228289 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.228342 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.228351 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.228373 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.228386 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.331068 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.331126 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.331138 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.331153 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.331164 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.436719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.436802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.436814 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.436832 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.436847 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.540976 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.541008 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.541016 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.541032 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.541044 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.645439 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.645507 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.645525 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.645555 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.645575 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.747776 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.747817 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.747829 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.747844 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.747857 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.851674 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.851728 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.851741 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.851757 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.851768 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.956066 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.956139 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.956148 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.956168 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:03 crc kubenswrapper[4734]: I1125 09:29:03.956178 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:03Z","lastTransitionTime":"2025-11-25T09:29:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.059037 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.059102 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.059111 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.059127 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.059139 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.161932 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.161968 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.161978 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.161991 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.162000 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.246470 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.246523 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.246577 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.246606 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:04 crc kubenswrapper[4734]: E1125 09:29:04.246677 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:04 crc kubenswrapper[4734]: E1125 09:29:04.246973 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:04 crc kubenswrapper[4734]: E1125 09:29:04.247182 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:04 crc kubenswrapper[4734]: E1125 09:29:04.247326 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.264830 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.264893 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.264911 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.264931 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.264941 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.367315 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.367363 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.367371 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.367385 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.367396 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.470767 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.470828 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.470839 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.470858 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.470870 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.574681 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.574725 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.574735 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.574753 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.574768 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.677200 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.677264 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.677286 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.677311 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.677326 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.786455 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.786495 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.786505 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.786521 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.786536 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.890047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.890115 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.890127 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.890147 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.890160 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.993318 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.993362 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.993372 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.993391 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:04 crc kubenswrapper[4734]: I1125 09:29:04.993405 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:04Z","lastTransitionTime":"2025-11-25T09:29:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.096273 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.096314 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.096322 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.096338 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.096350 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.197896 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.197926 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.197933 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.197945 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.197954 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.300736 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.300795 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.300808 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.300826 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.300839 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.403979 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.404044 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.404054 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.404069 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.404079 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.506538 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.506575 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.506583 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.506599 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.506608 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.608852 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.608919 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.608939 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.608960 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.608974 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.712435 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.712481 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.712493 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.712511 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.712524 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.814473 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.814516 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.814525 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.814543 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.814553 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.919529 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.919606 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.919618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.919642 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:05 crc kubenswrapper[4734]: I1125 09:29:05.919656 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:05Z","lastTransitionTime":"2025-11-25T09:29:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.022719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.022760 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.022769 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.022783 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.022794 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.125779 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.125829 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.125842 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.125862 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.125875 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.228924 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.228972 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.228986 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.229006 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.229020 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.246677 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:06 crc kubenswrapper[4734]: E1125 09:29:06.246847 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.246695 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:06 crc kubenswrapper[4734]: E1125 09:29:06.246946 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.246695 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.246974 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:06 crc kubenswrapper[4734]: E1125 09:29:06.247281 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:06 crc kubenswrapper[4734]: E1125 09:29:06.247503 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.332036 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.332064 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.332073 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.332107 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.332117 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.434401 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.434465 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.434476 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.434491 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.434503 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.536626 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.536669 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.536689 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.536709 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.536719 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.639411 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.640168 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.640202 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.640222 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.640237 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.742311 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.742369 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.742382 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.742415 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.742431 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.845702 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.845764 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.845780 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.845802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.845815 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.948060 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.948380 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.948469 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.948609 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:06 crc kubenswrapper[4734]: I1125 09:29:06.948717 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:06Z","lastTransitionTime":"2025-11-25T09:29:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.050942 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.050981 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.050992 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.051010 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.051020 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.153938 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.154363 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.154511 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.154608 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.154681 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.257300 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.257415 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.257431 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.257452 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.257468 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.360770 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.361400 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.361481 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.361549 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.361632 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.466402 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.466483 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.466494 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.466510 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.466523 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.570039 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.570117 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.570132 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.570156 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.570170 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.673324 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.673457 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.673475 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.673499 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.673513 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.777564 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.777625 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.777636 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.777657 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.777669 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.881202 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.881256 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.881267 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.881288 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.881308 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.983542 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.983587 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.983602 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.983618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:07 crc kubenswrapper[4734]: I1125 09:29:07.983631 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:07Z","lastTransitionTime":"2025-11-25T09:29:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.086775 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.086835 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.086845 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.086860 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.086889 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.190515 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.190576 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.190595 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.190618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.190637 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.246583 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.246649 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.246742 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:08 crc kubenswrapper[4734]: E1125 09:29:08.246870 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.246885 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:08 crc kubenswrapper[4734]: E1125 09:29:08.247029 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:08 crc kubenswrapper[4734]: E1125 09:29:08.247067 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:08 crc kubenswrapper[4734]: E1125 09:29:08.247152 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.293388 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.293441 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.293454 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.293474 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.293487 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.396786 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.396825 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.396834 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.396853 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.396865 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.499576 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.499662 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.499677 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.499695 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.499710 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.583453 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:08 crc kubenswrapper[4734]: E1125 09:29:08.583650 4734 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:29:08 crc kubenswrapper[4734]: E1125 09:29:08.583749 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs podName:54363663-3559-4203-bf8f-03e3bf4d1127 nodeName:}" failed. No retries permitted until 2025-11-25 09:29:40.583722962 +0000 UTC m=+103.394184956 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs") pod "network-metrics-daemon-tfr8m" (UID: "54363663-3559-4203-bf8f-03e3bf4d1127") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.602864 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.602919 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.602933 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.602949 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.602959 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.706455 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.706521 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.706532 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.706551 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.706562 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.810048 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.810119 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.810130 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.810151 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.810162 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.914023 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.914098 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.914113 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.914134 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:08 crc kubenswrapper[4734]: I1125 09:29:08.914148 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:08Z","lastTransitionTime":"2025-11-25T09:29:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.017021 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.017072 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.017105 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.017123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.017133 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.119824 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.119863 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.119873 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.119890 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.119903 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.222876 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.222928 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.222940 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.222963 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.222979 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.325979 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.326022 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.326034 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.326055 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.326073 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.428697 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.428737 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.428753 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.428772 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.428786 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.531231 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.531276 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.531285 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.531302 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.531311 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.634455 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.634506 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.634523 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.634555 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.634567 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.737989 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.738038 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.738049 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.738069 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.738108 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.840748 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.840791 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.840802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.840817 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.840829 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.943768 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.943847 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.943865 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.943898 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:09 crc kubenswrapper[4734]: I1125 09:29:09.943918 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:09Z","lastTransitionTime":"2025-11-25T09:29:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.047650 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.047699 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.047710 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.047726 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.047736 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.151269 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.151342 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.151358 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.151383 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.151408 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.153502 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.153574 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.153597 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.153634 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.153657 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.167636 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.181320 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.181360 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.181370 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.181387 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.181398 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.202164 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.206965 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.207028 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.207041 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.207065 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.207078 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.224260 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.228760 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.228810 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.228822 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.228843 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.228856 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.242181 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246046 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246111 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246121 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246140 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246169 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246203 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246218 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246241 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.246256 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.246269 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.246335 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.246489 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.246549 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.257075 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.258465 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: E1125 09:29:10.258665 4734 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.262303 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.262349 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.262362 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.262382 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.262398 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.266781 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.284781 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.298050 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.312343 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.325307 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.339529 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.352796 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.364472 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.364515 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.364526 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.364544 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.364555 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.369463 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.386056 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.400156 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.414758 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.439733 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.454867 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.467353 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.467398 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.467412 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.467431 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.467448 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.473650 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.487187 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.498172 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:10Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.570145 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.570178 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.570201 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.570218 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.570230 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.673312 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.673355 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.673366 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.673387 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.673405 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.775489 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.775552 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.775565 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.775581 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.775593 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.878861 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.878924 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.878936 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.878957 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.878969 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.981898 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.981934 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.981943 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.981959 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:10 crc kubenswrapper[4734]: I1125 09:29:10.981968 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:10Z","lastTransitionTime":"2025-11-25T09:29:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.084795 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.084866 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.084880 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.084905 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.084921 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.188356 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.188390 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.188400 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.188412 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.188422 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.291163 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.291207 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.291220 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.291239 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.291251 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.394180 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.394251 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.394265 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.394288 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.394303 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.496692 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.496747 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.496766 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.496791 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.496811 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.599113 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.599147 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.599156 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.599173 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.599183 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.701486 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.701555 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.701567 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.701586 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.701597 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.803729 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.803788 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.803802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.803820 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.803834 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.825834 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/0.log" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.825885 4734 generic.go:334] "Generic (PLEG): container finished" podID="80259512-c4ac-4362-b21e-386796e31645" containerID="748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc" exitCode=1 Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.825912 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7t7mh" event={"ID":"80259512-c4ac-4362-b21e-386796e31645","Type":"ContainerDied","Data":"748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.826286 4734 scope.go:117] "RemoveContainer" containerID="748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.839184 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.852148 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.868500 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:11Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:29:11Z\\\",\\\"message\\\":\\\"2025-11-25T09:28:25+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4\\\\n2025-11-25T09:28:25+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4 to /host/opt/cni/bin/\\\\n2025-11-25T09:28:25Z [verbose] multus-daemon started\\\\n2025-11-25T09:28:25Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:29:10Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.881714 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.895995 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.907180 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.907232 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.907244 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.907264 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.907277 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:11Z","lastTransitionTime":"2025-11-25T09:29:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.918946 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.935592 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.953355 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.970045 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.982563 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:11 crc kubenswrapper[4734]: I1125 09:29:11.994953 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:11Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.007784 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.010173 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.010212 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.010226 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.010247 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.010261 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.049752 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.091197 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.109196 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.113591 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.113621 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.113630 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.113644 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.113655 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.125823 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.139917 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.217102 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.217153 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.217166 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.217184 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.217194 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.246457 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.246504 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.246483 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.246503 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:12 crc kubenswrapper[4734]: E1125 09:29:12.246669 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:12 crc kubenswrapper[4734]: E1125 09:29:12.246858 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:12 crc kubenswrapper[4734]: E1125 09:29:12.247037 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:12 crc kubenswrapper[4734]: E1125 09:29:12.247178 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.320700 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.320862 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.320949 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.321032 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.321117 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.424402 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.424444 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.424452 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.424487 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.424499 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.527818 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.528286 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.528387 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.528492 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.528568 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.631447 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.631493 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.631505 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.631521 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.631532 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.734725 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.734755 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.734765 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.734779 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.734790 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.831210 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/0.log" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.831288 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7t7mh" event={"ID":"80259512-c4ac-4362-b21e-386796e31645","Type":"ContainerStarted","Data":"babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.836720 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.836752 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.836763 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.836775 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.836785 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.845154 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.858466 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.874797 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.892434 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.905238 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.916536 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.929493 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.939130 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.939166 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.939177 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.939193 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.939204 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:12Z","lastTransitionTime":"2025-11-25T09:29:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.945494 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:29:11Z\\\",\\\"message\\\":\\\"2025-11-25T09:28:25+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4\\\\n2025-11-25T09:28:25+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4 to /host/opt/cni/bin/\\\\n2025-11-25T09:28:25Z [verbose] multus-daemon started\\\\n2025-11-25T09:28:25Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:29:10Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.960382 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.974389 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:12 crc kubenswrapper[4734]: I1125 09:29:12.995243 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.008668 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:13Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.023416 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:13Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.036295 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:13Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.040981 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.041039 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.041053 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.041099 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.041119 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.048243 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:13Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.060041 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:13Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.071405 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:13Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.143754 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.143795 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.143806 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.144247 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.144277 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.247114 4734 scope.go:117] "RemoveContainer" containerID="646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682" Nov 25 09:29:13 crc kubenswrapper[4734]: E1125 09:29:13.247300 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.249309 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.249342 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.249353 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.249366 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.249379 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.351790 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.351848 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.351893 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.351918 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.351938 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.454455 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.454531 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.454554 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.454583 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.454605 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.558023 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.558099 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.558115 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.558135 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.558147 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.661056 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.661133 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.661146 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.661164 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.661179 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.764043 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.764136 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.764151 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.764169 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.764184 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.867632 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.867671 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.867681 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.867698 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.867713 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.970141 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.970642 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.970663 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.970679 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:13 crc kubenswrapper[4734]: I1125 09:29:13.970691 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:13Z","lastTransitionTime":"2025-11-25T09:29:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.073308 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.073356 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.073366 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.073381 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.073392 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.176797 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.176843 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.176854 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.176870 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.176884 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.246338 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.246340 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.246361 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.246474 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:14 crc kubenswrapper[4734]: E1125 09:29:14.246603 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:14 crc kubenswrapper[4734]: E1125 09:29:14.246693 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:14 crc kubenswrapper[4734]: E1125 09:29:14.246780 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:14 crc kubenswrapper[4734]: E1125 09:29:14.246852 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.278832 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.278883 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.278893 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.278907 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.278917 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.380831 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.380881 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.380894 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.380914 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.380926 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.483256 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.483298 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.483309 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.483324 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.483336 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.587260 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.587341 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.587357 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.587378 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.587394 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.690506 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.690572 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.690592 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.690617 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.690633 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.792981 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.793017 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.793027 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.793041 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.793053 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.895685 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.896626 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.896721 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.896825 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:14 crc kubenswrapper[4734]: I1125 09:29:14.896928 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:14Z","lastTransitionTime":"2025-11-25T09:29:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.000428 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.000883 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.000976 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.001107 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.001195 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.103822 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.104125 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.104209 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.104291 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.104371 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.207396 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.207780 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.207927 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.208107 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.208248 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.311135 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.311477 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.311582 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.311681 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.311781 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.416654 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.416715 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.416726 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.416747 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.416766 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.520469 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.520518 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.520531 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.520553 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.520567 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.624348 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.624407 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.624418 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.624439 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.624455 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.727221 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.727260 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.727269 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.727286 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.727296 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.830241 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.830306 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.830327 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.830349 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.830366 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.936251 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.937026 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.937141 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.937494 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:15 crc kubenswrapper[4734]: I1125 09:29:15.937585 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:15Z","lastTransitionTime":"2025-11-25T09:29:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.041688 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.041731 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.041741 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.041756 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.041767 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.144408 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.144488 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.144511 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.144544 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.144567 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.245950 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.246764 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.245966 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:16 crc kubenswrapper[4734]: E1125 09:29:16.246928 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.246118 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:16 crc kubenswrapper[4734]: E1125 09:29:16.246991 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:16 crc kubenswrapper[4734]: E1125 09:29:16.247120 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:16 crc kubenswrapper[4734]: E1125 09:29:16.247909 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.248048 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.248377 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.248594 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.248811 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.249157 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.353164 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.353237 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.353246 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.353262 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.353273 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.455970 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.456025 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.456065 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.456105 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.456119 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.558605 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.558650 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.558662 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.558677 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.558687 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.661570 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.661632 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.661654 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.661683 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.661704 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.764175 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.764247 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.764265 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.764285 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.764302 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.866720 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.866768 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.866779 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.866795 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.866810 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.969122 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.969804 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.969903 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.969989 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:16 crc kubenswrapper[4734]: I1125 09:29:16.970094 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:16Z","lastTransitionTime":"2025-11-25T09:29:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.073015 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.073129 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.073142 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.073159 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.073170 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.175953 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.176276 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.176352 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.176418 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.176488 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.278871 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.278908 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.278919 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.278933 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.278942 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.383836 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.384144 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.384219 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.384291 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.384352 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.486411 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.486772 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.486880 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.486975 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.487066 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.589618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.589941 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.590043 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.590220 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.590320 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.693648 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.693739 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.693768 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.693797 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.693820 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.796118 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.796166 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.796178 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.796195 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.796206 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.897683 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.897719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.897727 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.897741 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:17 crc kubenswrapper[4734]: I1125 09:29:17.897751 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:17Z","lastTransitionTime":"2025-11-25T09:29:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:17.999999 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.000052 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.000068 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.000113 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.000132 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.102570 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.102623 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.102644 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.102665 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.102679 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.205927 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.205986 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.205999 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.206019 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.206031 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.246002 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.246060 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.245966 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:18 crc kubenswrapper[4734]: E1125 09:29:18.246206 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.246289 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:18 crc kubenswrapper[4734]: E1125 09:29:18.246402 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:18 crc kubenswrapper[4734]: E1125 09:29:18.246479 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:18 crc kubenswrapper[4734]: E1125 09:29:18.246545 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.309284 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.309328 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.309337 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.309351 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.309361 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.413522 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.413578 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.413589 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.413608 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.413620 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.520968 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.521060 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.522002 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.522041 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.522055 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.624335 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.624372 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.624384 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.624400 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.624413 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.726372 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.726416 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.726428 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.726444 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.726455 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.829717 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.829781 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.829791 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.829808 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.829819 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.934027 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.934140 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.934164 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.934186 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:18 crc kubenswrapper[4734]: I1125 09:29:18.934204 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:18Z","lastTransitionTime":"2025-11-25T09:29:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.036846 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.036903 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.036921 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.036946 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.036963 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.139781 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.139842 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.139861 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.139885 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.139902 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.242319 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.242383 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.242405 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.242426 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.242441 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.345514 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.345604 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.345629 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.345662 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.345686 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.448302 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.448345 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.448356 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.448372 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.448384 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.551177 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.551259 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.551277 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.551294 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.551305 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.653869 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.653914 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.653926 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.653943 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.653956 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.756421 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.756459 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.756468 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.756482 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.756491 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.860621 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.860685 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.860698 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.860719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.860731 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.963272 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.963529 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.963544 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.963570 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:19 crc kubenswrapper[4734]: I1125 09:29:19.963585 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:19Z","lastTransitionTime":"2025-11-25T09:29:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.066770 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.066829 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.066848 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.066876 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.066895 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.170317 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.170367 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.170381 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.170398 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.170409 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.245893 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.246016 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.246096 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.246167 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.246310 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.246374 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.246431 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.246505 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.264539 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.273162 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.273187 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.273194 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.273208 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.273217 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.275220 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.290992 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.303315 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:29:11Z\\\",\\\"message\\\":\\\"2025-11-25T09:28:25+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4\\\\n2025-11-25T09:28:25+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4 to /host/opt/cni/bin/\\\\n2025-11-25T09:28:25Z [verbose] multus-daemon started\\\\n2025-11-25T09:28:25Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:29:10Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.306266 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.306305 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.306317 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.306334 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.306344 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.316863 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.317117 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.320994 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.321029 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.321040 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.321055 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.321066 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.329608 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.332591 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.336967 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.336998 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.337009 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.337031 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.337042 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.342320 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.349449 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.354935 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.354981 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.354995 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.355012 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.355026 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.366893 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.368112 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.372994 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.373035 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.373047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.373065 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.373094 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.382246 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.386294 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: E1125 09:29:20.386449 4734 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.392419 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.392945 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.393044 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.393174 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.393290 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.393877 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.416542 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.429235 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.449151 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.464876 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.481680 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.496073 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.496403 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.498448 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.498751 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.498853 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.499238 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.511605 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:20Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.601424 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.601702 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.601779 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.601839 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.601901 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.704587 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.704650 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.704671 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.704696 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.704715 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.807948 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.807988 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.808000 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.808015 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.808025 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.910664 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.910713 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.910724 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.910742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:20 crc kubenswrapper[4734]: I1125 09:29:20.910752 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:20Z","lastTransitionTime":"2025-11-25T09:29:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.013547 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.013875 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.013969 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.014065 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.014241 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.117159 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.117211 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.117223 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.117243 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.117255 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.220061 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.220119 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.220131 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.220148 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.220162 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.322874 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.322905 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.322917 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.322932 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.322942 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.425289 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.425345 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.425361 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.425383 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.425399 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.527564 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.527616 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.527626 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.527643 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.527655 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.630191 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.630223 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.630233 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.630247 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.630257 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.732043 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.732106 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.732119 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.732144 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.732155 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.834993 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.835043 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.835061 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.835107 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.835124 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.937319 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.937365 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.937377 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.937394 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:21 crc kubenswrapper[4734]: I1125 09:29:21.937405 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:21Z","lastTransitionTime":"2025-11-25T09:29:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.040544 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.040614 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.040624 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.040643 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.040654 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.142850 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.142905 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.142920 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.142938 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.142950 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.245714 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.245778 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.245793 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.245818 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.245840 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.246071 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.246155 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:22 crc kubenswrapper[4734]: E1125 09:29:22.246201 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.246074 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:22 crc kubenswrapper[4734]: E1125 09:29:22.246268 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.246157 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:22 crc kubenswrapper[4734]: E1125 09:29:22.246357 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:22 crc kubenswrapper[4734]: E1125 09:29:22.246430 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.349397 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.350075 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.350275 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.350434 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.350632 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.453857 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.453900 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.453910 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.453925 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.453934 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.556410 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.556478 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.556503 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.556536 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.556560 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.658742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.658771 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.658780 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.658792 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.658800 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.761051 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.761112 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.761123 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.761143 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.761155 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.863420 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.863455 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.863467 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.863482 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.863492 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.967208 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.967243 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.967255 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.967433 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:22 crc kubenswrapper[4734]: I1125 09:29:22.967447 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:22Z","lastTransitionTime":"2025-11-25T09:29:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.070405 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.070456 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.070472 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.070487 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.070498 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.173679 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.173739 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.173756 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.173778 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.173793 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.275849 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.275918 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.275939 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.275962 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.275978 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.388954 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.389032 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.389059 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.389119 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.389142 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.491800 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.491853 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.491864 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.491880 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.491893 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.594843 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.594917 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.594931 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.594949 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.594961 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.696822 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.696856 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.696867 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.696882 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.696892 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.800591 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.800660 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.800678 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.800702 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.800722 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.903268 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.903320 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.903334 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.903351 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:23 crc kubenswrapper[4734]: I1125 09:29:23.903364 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:23Z","lastTransitionTime":"2025-11-25T09:29:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.005957 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.006027 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.006038 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.006054 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.006114 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.109269 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.109340 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.109359 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.109384 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.109400 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.212275 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.212630 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.212656 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.212681 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.212697 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.246450 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.246593 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:24 crc kubenswrapper[4734]: E1125 09:29:24.246807 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.247112 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.247170 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:24 crc kubenswrapper[4734]: E1125 09:29:24.247121 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:24 crc kubenswrapper[4734]: E1125 09:29:24.247319 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:24 crc kubenswrapper[4734]: E1125 09:29:24.247385 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.314760 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.314798 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.314807 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.314821 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.314831 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.417382 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.417451 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.417468 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.417493 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.417510 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.520656 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.520714 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.520725 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.520746 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.520758 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.623572 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.623633 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.623650 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.623673 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.623688 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.726062 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.726144 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.726157 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.726174 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.726185 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.828652 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.828688 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.828697 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.828711 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.828720 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.931238 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.931275 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.931285 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.931300 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:24 crc kubenswrapper[4734]: I1125 09:29:24.931310 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:24Z","lastTransitionTime":"2025-11-25T09:29:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.034043 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.034097 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.034111 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.034126 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.034136 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.137075 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.137167 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.137190 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.137218 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.137251 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.240134 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.240171 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.240182 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.240199 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.240209 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.343700 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.343765 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.343779 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.343805 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.343826 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.447023 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.447111 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.447126 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.447150 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.447197 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.550165 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.550238 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.550255 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.550279 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.550292 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.653212 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.653256 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.653267 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.653284 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.653294 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.755816 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.755841 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.755849 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.755862 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.755870 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.858039 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.858075 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.858102 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.858117 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.858126 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.961742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.961794 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.961804 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.961825 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:25 crc kubenswrapper[4734]: I1125 09:29:25.961834 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:25Z","lastTransitionTime":"2025-11-25T09:29:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.064306 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.064355 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.064365 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.064379 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.064389 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.120359 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.120523 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.120606 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.120739 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:30.120713586 +0000 UTC m=+152.931175600 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.120760 4734 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.120789 4734 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.120817 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:30:30.120803778 +0000 UTC m=+152.931265772 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.120841 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:30:30.120825909 +0000 UTC m=+152.931287913 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.167361 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.167411 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.167423 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.167435 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.167445 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.220997 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.221064 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.221210 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.221235 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.221247 4734 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.221304 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:30:30.221288151 +0000 UTC m=+153.031750145 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.221311 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.221351 4734 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.221375 4734 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.221460 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:30:30.221435736 +0000 UTC m=+153.031897770 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.248273 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.248398 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.248436 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.248481 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.248451 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.248582 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.248732 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:26 crc kubenswrapper[4734]: E1125 09:29:26.248901 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.269683 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.269730 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.269742 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.269759 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.269771 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.372734 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.372767 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.372775 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.372789 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.372797 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.474704 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.474746 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.474757 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.474772 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.474781 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.577397 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.577445 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.577455 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.577470 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.577480 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.679974 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.680001 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.680009 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.680021 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.680031 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.783101 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.783140 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.783150 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.783162 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.783172 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.885565 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.885598 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.885606 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.885618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.885626 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.987767 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.987800 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.987808 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.987821 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:26 crc kubenswrapper[4734]: I1125 09:29:26.987830 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:26Z","lastTransitionTime":"2025-11-25T09:29:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.089871 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.089941 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.089965 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.089995 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.090018 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.192755 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.192813 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.192827 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.192842 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.192853 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.295430 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.295475 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.295485 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.295503 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.295513 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.398152 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.398201 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.398212 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.398232 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.398247 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.500911 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.500949 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.500960 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.500977 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.500988 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.603802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.603850 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.603863 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.603877 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.603888 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.706047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.706109 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.706120 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.706134 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.706147 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.807911 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.807948 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.807959 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.807975 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.807985 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.910324 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.910364 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.910373 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.910386 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:27 crc kubenswrapper[4734]: I1125 09:29:27.910397 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:27Z","lastTransitionTime":"2025-11-25T09:29:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.012736 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.012776 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.012802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.012815 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.012824 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.115633 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.115687 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.115702 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.115718 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.115730 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.218746 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.218795 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.218804 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.218820 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.218831 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.246551 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.246555 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.246592 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.246774 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:28 crc kubenswrapper[4734]: E1125 09:29:28.246815 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:28 crc kubenswrapper[4734]: E1125 09:29:28.246934 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:28 crc kubenswrapper[4734]: E1125 09:29:28.247016 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:28 crc kubenswrapper[4734]: E1125 09:29:28.247118 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.247667 4734 scope.go:117] "RemoveContainer" containerID="646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.260340 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.321340 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.321376 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.321387 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.321404 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.321416 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.424432 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.424485 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.424497 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.424514 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.424527 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.528266 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.528337 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.528348 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.528373 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.528393 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.631124 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.631213 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.631227 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.631249 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.631263 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.734161 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.734515 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.734527 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.734541 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.734552 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.837978 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.838023 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.838033 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.838047 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.838057 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.893676 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/2.log" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.896497 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.897037 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.912503 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.926911 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.938840 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.939781 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.939817 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.939827 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.939843 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.939854 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:28Z","lastTransitionTime":"2025-11-25T09:29:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.954443 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.969449 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.982558 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:28 crc kubenswrapper[4734]: I1125 09:29:28.996422 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.012388 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.023238 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.031738 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.042268 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.042317 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.042334 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.042358 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.042377 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.042757 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.059642 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:29:11Z\\\",\\\"message\\\":\\\"2025-11-25T09:28:25+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4\\\\n2025-11-25T09:28:25+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4 to /host/opt/cni/bin/\\\\n2025-11-25T09:28:25Z [verbose] multus-daemon started\\\\n2025-11-25T09:28:25Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:29:10Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.073061 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.093379 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.107812 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.126575 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"135e2695-c0c1-46f2-baf2-94b1e8018024\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46afdcaa43f5bc131b67fd12490703381f281179a070573c34a6334c5a75b684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3bd284eafdbd12b20eaec6caa60cc2dc2f174370be30643a23cf25b23721bfea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bd284eafdbd12b20eaec6caa60cc2dc2f174370be30643a23cf25b23721bfea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.143317 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.144584 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.144615 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.144623 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.144636 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.144660 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.207687 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.246605 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.246667 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.246684 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.246912 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.246930 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.349307 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.349344 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.349353 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.349368 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.349377 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.451701 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.451751 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.451762 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.451777 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.451792 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.553740 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.553788 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.553800 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.553816 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.553829 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.655892 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.655930 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.655950 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.655967 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.655978 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.759472 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.759533 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.759546 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.759562 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.759573 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.862448 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.862513 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.862536 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.862565 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.862583 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.965502 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.965565 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.965588 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.965618 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:29 crc kubenswrapper[4734]: I1125 09:29:29.965639 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:29Z","lastTransitionTime":"2025-11-25T09:29:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.069116 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.069178 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.069191 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.069211 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.069224 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.171950 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.172005 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.172018 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.172036 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.172047 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.247380 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.247382 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.247654 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.247789 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.247993 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.248129 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.248505 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.248653 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.267751 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.274723 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.274788 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.274811 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.274834 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.274856 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.285660 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.298327 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.312009 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.324067 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.336792 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.355319 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:29:11Z\\\",\\\"message\\\":\\\"2025-11-25T09:28:25+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4\\\\n2025-11-25T09:28:25+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4 to /host/opt/cni/bin/\\\\n2025-11-25T09:28:25Z [verbose] multus-daemon started\\\\n2025-11-25T09:28:25Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:29:10Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.371190 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.378066 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.378158 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.378175 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.378202 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.378220 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.384981 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.399515 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.413310 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.427323 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"135e2695-c0c1-46f2-baf2-94b1e8018024\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46afdcaa43f5bc131b67fd12490703381f281179a070573c34a6334c5a75b684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3bd284eafdbd12b20eaec6caa60cc2dc2f174370be30643a23cf25b23721bfea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bd284eafdbd12b20eaec6caa60cc2dc2f174370be30643a23cf25b23721bfea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.445550 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.469677 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.481560 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.481629 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.481642 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.481664 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.481678 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.485007 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.498976 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.513895 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.530512 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.574830 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.574894 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.574910 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.574932 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.574946 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.590156 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.594244 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.594295 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.594308 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.594327 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.594341 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.610679 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.614810 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.614881 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.614901 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.614926 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.614941 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.632388 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.636531 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.636581 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.636601 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.636630 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.636648 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.654024 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.659039 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.659099 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.659109 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.659124 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.659133 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.677286 4734 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"313cb2d5-19b9-400d-8416-99d9919180d4\\\",\\\"systemUUID\\\":\\\"19343fc1-91cb-4eae-8f56-eacf25f0be5a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.677413 4734 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.679662 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.679749 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.679761 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.679782 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.679794 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.782497 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.782548 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.782557 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.782576 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.782589 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.884542 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.884591 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.884600 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.884614 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.884622 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.906586 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/3.log" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.908128 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/2.log" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.910964 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a" exitCode=1 Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.911027 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.911102 4734 scope.go:117] "RemoveContainer" containerID="646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.914911 4734 scope.go:117] "RemoveContainer" containerID="4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a" Nov 25 09:29:30 crc kubenswrapper[4734]: E1125 09:29:30.915423 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.931103 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://761aaa27f3e85f2a16f54f5303b87f7bfb69f449895d9a962e8318ef41b62998\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.946553 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-sqcpf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4ce04403-9506-4775-83ce-62ced0a6f576\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://066ef115ebba18aad79e0d76a61340b61b08221ebf91f397c603aa3eb56c3f98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p5k7x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-sqcpf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.961205 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.973193 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-qhhvk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d67566fa-8990-4e98-93f5-b43f2bada700\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24c8680f1b2345e7788019e0492011256eb37dbf7d0419accd11fed8b48f7912\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzws6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-qhhvk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.984062 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a9dd9f275f748136700042c8cc5262e21f8635e9aca2a070a17f98fd59217e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4wbjj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2n2f8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.987383 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.987435 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.987448 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.987469 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.987481 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:30Z","lastTransitionTime":"2025-11-25T09:29:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:30 crc kubenswrapper[4734]: I1125 09:29:30.998196 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-7t7mh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"80259512-c4ac-4362-b21e-386796e31645\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:29:11Z\\\",\\\"message\\\":\\\"2025-11-25T09:28:25+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4\\\\n2025-11-25T09:28:25+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_54d6d6d2-dc7d-4e1a-a2b8-e9dadc5199e4 to /host/opt/cni/bin/\\\\n2025-11-25T09:28:25Z [verbose] multus-daemon started\\\\n2025-11-25T09:28:25Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:29:10Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lhfm8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-7t7mh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:30Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.011373 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"83557809-7515-4cc3-afab-e940ed4b823f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://137d2092090faf3f351e73c5258f62cdd5c286d712781461476a9f1cf51d0d2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bd92fbe0f0ca60221482c77d0b048bd8633418d477a2583c1f99fb2eb2548323\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qks2b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:35Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-5xjzg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.028625 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"54363663-3559-4203-bf8f-03e3bf4d1127\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xj7s9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-tfr8m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.046056 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41c030ca-eff4-40a1-b521-ff0082855777\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0c363ec4fe901b1dd903fca10aeba043fa654232f663fef398bd7aa6c46bdcd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5ddb477bc4dd7cbeb195ca7edaa75ee7295672b52d253b00421baaa450792c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b644f14f3c0d9c88619dceecf9a143746cd619fb1bbe7225f822774afed2370e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://147c66b2df95acf04104d43bc9c73fedaf9641bf04a3a085df62d47e01592a3d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.061613 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://836f5b652b78d5630e2cbf4983ef6f1ffa82f7274674312d17bb9544eef3a460\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0aa9ced09e86d585a319db670a99a5e952ed9e389f71835bcbd5727c7f16dbf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.077472 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"135e2695-c0c1-46f2-baf2-94b1e8018024\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46afdcaa43f5bc131b67fd12490703381f281179a070573c34a6334c5a75b684\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3bd284eafdbd12b20eaec6caa60cc2dc2f174370be30643a23cf25b23721bfea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bd284eafdbd12b20eaec6caa60cc2dc2f174370be30643a23cf25b23721bfea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.089874 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.089911 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.089921 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.089935 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.089946 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.091187 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4da62509-e117-444b-9f78-c5c9e52b1b87\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46101fc89761da5f3b33a6e352621692098081ba17c9e85260af0e7f4ec5105a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eea440eadf7e1985ecbfb889248c419edfab04f73bc480a4ffb98a850ec4d306\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e952dfe3952457ec534e79351a9d3241c3bff0f7692129def99bc3b67e400db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da7fa13260c46496d5d7f306cc7a239830607a51b7ba18fd2fc151527540a1a1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca14b39f5d48e9573e627500ca7f1a3e5814458275023ea2ab3c7bd35af8d396\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ac41cbbbda6d49b3afebad25f05910045a872e42d5a44c7af27d6866e814bed7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://11f79a05bc7311295e99dd2b9677ead95c7c607f9e0a92e30e7d516bc61bec5e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bshnh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jg2nq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.112751 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cbdcaaef-9e1d-421f-b1fa-05223f0067af\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://646b43d490a1eb253c98f142c65830d1cb75006c85fe9700c2351f75f1129682\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:28:57Z\\\",\\\"message\\\":\\\"le-plugin\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.246\\\\\\\", Port:9443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:28:57.298461 6496 services_controller.go:445] Built service openshift-machine-api/machine-api-operator LB template configs for network=default: []services.lbConfig(nil)\\\\nI1125 09:28:57.298467 6496 services_controller.go:452] Built service openshift-network-console/networking-console-plugin per-node LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298479 6496 services_controller.go:453] Built service openshift-network-console/networking-console-plugin template LB for network=default: []services.LB{}\\\\nI1125 09:28:57.298488 6496 services_controller.go:454] Service openshift-network-console/networking-console-plugin for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nF1125 09:28:57.298489 6496 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:29:30Z\\\",\\\"message\\\":\\\"h\\\\nI1125 09:29:29.588881 6928 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:29:29.588884 6928 base_network_controller_pods.go:477] [default/openshift-network-console/networking-console-plugin-85b44fc459-gdk6g] creating logical port openshift-network-console_networking-console-plugin-85b44fc459-gdk6g for pod on switch crc\\\\nI1125 09:29:29.588886 6928 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-7t7mh in node crc\\\\nI1125 09:29:29.588896 6928 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-scheduler/openshift-kube-scheduler-crc\\\\nI1125 09:29:29.588803 6928 obj_retry.go:365] Adding new object: *v1.Pod openshift-machine-config-operator/kube-rbac-proxy-crio-crc\\\\nF1125 09:29:29.588806 6928 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.ne\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:29:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9fvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-2vvjj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.124350 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f5ee61e-049f-432a-8205-180bbd86fc76\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://38854837f3e83bc3ee8476d28fa2e351a31a3315ac4bdab866c8322ef6aa6262\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ac06b663f0292c0129009247e6b2af3ef9dd8b61d4fb1c821c0c513357df102\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ded866fc0c97ef53e1fa8af0a60764fc1afc3c498d9dd960518b2be1d6f8e58f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.137677 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.151152 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.164521 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6f695112-1be5-4918-9796-79c5f6cf7855\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:28:22Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 09:28:21.865725 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 09:28:21.865937 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:28:21.866991 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3183708157/tls.crt::/tmp/serving-cert-3183708157/tls.key\\\\\\\"\\\\nI1125 09:28:22.259163 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 09:28:22.261953 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 09:28:22.262538 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 09:28:22.262573 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 09:28:22.262580 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 09:28:22.269070 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 09:28:22.269130 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269137 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 09:28:22.269144 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 09:28:22.269149 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 09:28:22.269153 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 09:28:22.269157 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 09:28:22.269489 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 09:28:22.274795 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:28:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:28:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:28:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.177366 4734 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:28:23Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a5dc72b55dbccfa971011c7b43f22a6a52eab5dc653cb06d49e4f13c9157895\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:28:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:29:31Z is after 2025-08-24T17:21:41Z" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.192126 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.192155 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.192163 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.192175 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.192184 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.294982 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.295034 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.295046 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.295065 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.295077 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.396777 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.396825 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.396838 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.396853 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.396864 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.499220 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.499261 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.499270 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.499302 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.499312 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.602199 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.602257 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.602275 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.602300 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.602317 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.705839 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.705915 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.705939 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.705968 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.705988 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.809290 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.809409 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.809427 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.809503 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.809558 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.913226 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.913309 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.913327 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.913351 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.913367 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:31Z","lastTransitionTime":"2025-11-25T09:29:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:31 crc kubenswrapper[4734]: I1125 09:29:31.918691 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/3.log" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.015648 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.015797 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.015816 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.015838 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.015912 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.118921 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.118968 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.118980 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.118997 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.119008 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.221715 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.221781 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.221802 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.221834 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.221859 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.247008 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.247067 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.247195 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.247041 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:32 crc kubenswrapper[4734]: E1125 09:29:32.247300 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:32 crc kubenswrapper[4734]: E1125 09:29:32.247438 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:32 crc kubenswrapper[4734]: E1125 09:29:32.247591 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:32 crc kubenswrapper[4734]: E1125 09:29:32.247756 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.325485 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.325568 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.325593 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.325622 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.325644 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.428509 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.428559 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.428573 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.428592 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.428607 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.531980 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.532030 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.532045 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.532061 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.532072 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.634007 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.634055 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.634069 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.634102 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.634111 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.736896 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.736951 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.736964 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.736980 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.736992 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.841238 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.841304 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.841323 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.841349 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.841370 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.944344 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.944398 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.944410 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.944427 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:32 crc kubenswrapper[4734]: I1125 09:29:32.944436 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:32Z","lastTransitionTime":"2025-11-25T09:29:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.048645 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.048731 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.048757 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.048790 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.048815 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.152198 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.152249 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.152264 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.152312 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.152325 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.254966 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.255023 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.255037 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.255056 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.255070 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.358294 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.358378 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.358391 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.358414 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.358431 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.461652 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.461715 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.461731 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.461753 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.461765 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.564864 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.564915 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.564928 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.564948 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.564960 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.667995 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.668044 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.668056 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.668074 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.668129 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.771313 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.771373 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.771389 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.771412 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.771426 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.874674 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.874739 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.874752 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.874771 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.874783 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.977112 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.977156 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.977167 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.977183 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:33 crc kubenswrapper[4734]: I1125 09:29:33.977195 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:33Z","lastTransitionTime":"2025-11-25T09:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.079296 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.079334 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.079343 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.079358 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.079367 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.181636 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.181715 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.181724 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.181738 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.181748 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.246127 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.246179 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:34 crc kubenswrapper[4734]: E1125 09:29:34.246366 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.246436 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.246446 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:34 crc kubenswrapper[4734]: E1125 09:29:34.246693 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:34 crc kubenswrapper[4734]: E1125 09:29:34.246802 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:34 crc kubenswrapper[4734]: E1125 09:29:34.246868 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.284114 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.284151 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.284160 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.284199 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.284217 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.387537 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.387611 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.387627 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.387652 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.387668 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.490651 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.490701 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.490746 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.490765 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.490775 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.595624 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.595694 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.595704 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.595719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.595755 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.698727 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.698798 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.698817 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.698841 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.698858 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.801266 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.801306 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.801315 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.801331 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.801340 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.903446 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.903797 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.904039 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.904302 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:34 crc kubenswrapper[4734]: I1125 09:29:34.904474 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:34Z","lastTransitionTime":"2025-11-25T09:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.007587 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.007665 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.007678 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.007700 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.007718 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.113287 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.113347 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.113366 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.113395 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.113415 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.216525 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.216566 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.216580 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.216598 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.216611 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.319033 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.319125 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.319148 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.319172 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.319190 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.421193 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.421270 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.421293 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.421323 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.421346 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.524053 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.524116 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.524127 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.524145 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.524156 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.626598 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.626683 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.626694 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.626710 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.626722 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.729645 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.729693 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.729722 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.729737 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.729745 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.832681 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.832737 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.832753 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.832776 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.832790 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.936979 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.937030 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.937044 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.937062 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:35 crc kubenswrapper[4734]: I1125 09:29:35.937075 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:35Z","lastTransitionTime":"2025-11-25T09:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.042534 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.042946 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.042958 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.042996 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.043008 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.147235 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.147284 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.147293 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.147309 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.147318 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.246372 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.246530 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.246880 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:36 crc kubenswrapper[4734]: E1125 09:29:36.246874 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:36 crc kubenswrapper[4734]: E1125 09:29:36.247006 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:36 crc kubenswrapper[4734]: E1125 09:29:36.247171 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.247239 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:36 crc kubenswrapper[4734]: E1125 09:29:36.247353 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.249675 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.249719 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.249733 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.249755 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.249771 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.352476 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.352528 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.352540 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.352558 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.352571 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.455162 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.455210 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.455221 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.455238 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.455249 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.557938 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.557973 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.557982 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.558012 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.558022 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.661163 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.661249 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.661276 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.661304 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.661324 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.765804 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.765866 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.765889 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.765919 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.765940 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.869015 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.869054 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.869064 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.869079 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.869109 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.972317 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.972452 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.972476 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.972507 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:36 crc kubenswrapper[4734]: I1125 09:29:36.972528 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:36Z","lastTransitionTime":"2025-11-25T09:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.076039 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.076161 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.076181 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.076202 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.076218 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.179572 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.179644 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.179664 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.179691 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.179710 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.283304 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.283383 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.283396 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.283413 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.283426 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.386202 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.386233 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.386241 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.386254 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.386264 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.488880 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.488929 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.488940 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.488956 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.488968 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.591905 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.591960 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.591975 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.591995 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.592010 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.694461 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.694542 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.694581 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.694603 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.694615 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.797108 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.797148 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.797162 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.797180 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.797195 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.900755 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.900839 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.900863 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.900894 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:37 crc kubenswrapper[4734]: I1125 09:29:37.900913 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:37Z","lastTransitionTime":"2025-11-25T09:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.003502 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.003583 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.003598 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.003622 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.003636 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.107152 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.107209 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.107224 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.107248 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.107263 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.210259 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.210314 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.210324 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.210344 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.210357 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.246925 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.247102 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.247154 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:38 crc kubenswrapper[4734]: E1125 09:29:38.247315 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.247403 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:38 crc kubenswrapper[4734]: E1125 09:29:38.247627 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:38 crc kubenswrapper[4734]: E1125 09:29:38.248076 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:38 crc kubenswrapper[4734]: E1125 09:29:38.248198 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.314178 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.314227 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.314237 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.314253 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.314267 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.417144 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.417195 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.417207 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.417226 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.417239 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.519867 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.519932 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.519942 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.519968 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.519980 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.622614 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.622669 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.622680 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.622699 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.622715 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.726615 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.726666 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.726678 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.726697 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.726708 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.829368 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.829412 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.829424 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.829440 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.829450 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.931827 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.932137 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.932375 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.932545 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:38 crc kubenswrapper[4734]: I1125 09:29:38.932692 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:38Z","lastTransitionTime":"2025-11-25T09:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.035324 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.035606 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.035686 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.035766 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.035885 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.138848 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.138904 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.138917 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.138935 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.138947 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.241638 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.241680 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.241689 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.241701 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.241710 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.270874 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.349783 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.349847 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.349859 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.349877 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.349888 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.458305 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.458437 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.458502 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.458541 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.458577 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.561820 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.561901 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.561915 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.561935 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.561969 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.667454 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.667528 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.667548 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.667574 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.667596 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.770499 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.770533 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.770541 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.770554 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.770562 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.873546 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.873615 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.873626 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.873647 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.873660 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.977979 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.978062 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.978074 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.978121 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:39 crc kubenswrapper[4734]: I1125 09:29:39.978138 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:39Z","lastTransitionTime":"2025-11-25T09:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.081841 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.081905 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.081917 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.081934 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.081950 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.184519 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.184586 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.184602 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.184623 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.184637 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.246774 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.246828 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:40 crc kubenswrapper[4734]: E1125 09:29:40.246989 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.247071 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:40 crc kubenswrapper[4734]: E1125 09:29:40.247216 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:40 crc kubenswrapper[4734]: E1125 09:29:40.247428 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.247459 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:40 crc kubenswrapper[4734]: E1125 09:29:40.247622 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.289049 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.289167 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.289183 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.289226 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.289246 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.305745 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-qhhvk" podStartSLOduration=78.305729699 podStartE2EDuration="1m18.305729699s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.287244372 +0000 UTC m=+103.097706376" watchObservedRunningTime="2025-11-25 09:29:40.305729699 +0000 UTC m=+103.116191693" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.328267 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podStartSLOduration=78.328237545 podStartE2EDuration="1m18.328237545s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.306045618 +0000 UTC m=+103.116507612" watchObservedRunningTime="2025-11-25 09:29:40.328237545 +0000 UTC m=+103.138699579" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.328788 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-7t7mh" podStartSLOduration=78.328772971 podStartE2EDuration="1m18.328772971s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.327767451 +0000 UTC m=+103.138229445" watchObservedRunningTime="2025-11-25 09:29:40.328772971 +0000 UTC m=+103.139235005" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.362753 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-5xjzg" podStartSLOduration=77.362732626 podStartE2EDuration="1m17.362732626s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.34868592 +0000 UTC m=+103.159147934" watchObservedRunningTime="2025-11-25 09:29:40.362732626 +0000 UTC m=+103.173194620" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.392128 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.392171 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.392182 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.392197 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.392206 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.396889 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=48.396868796 podStartE2EDuration="48.396868796s" podCreationTimestamp="2025-11-25 09:28:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.382419748 +0000 UTC m=+103.192881742" watchObservedRunningTime="2025-11-25 09:29:40.396868796 +0000 UTC m=+103.207330790" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.409586 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=12.409571182 podStartE2EDuration="12.409571182s" podCreationTimestamp="2025-11-25 09:29:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.409109538 +0000 UTC m=+103.219571532" watchObservedRunningTime="2025-11-25 09:29:40.409571182 +0000 UTC m=+103.220033186" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.428698 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-jg2nq" podStartSLOduration=78.428681817 podStartE2EDuration="1m18.428681817s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.427567194 +0000 UTC m=+103.238029188" watchObservedRunningTime="2025-11-25 09:29:40.428681817 +0000 UTC m=+103.239143811" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.483500 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=1.483483939 podStartE2EDuration="1.483483939s" podCreationTimestamp="2025-11-25 09:29:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.482535881 +0000 UTC m=+103.292997875" watchObservedRunningTime="2025-11-25 09:29:40.483483939 +0000 UTC m=+103.293945933" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.493970 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.494013 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.494026 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.494042 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.494055 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.502651 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=73.502632555 podStartE2EDuration="1m13.502632555s" podCreationTimestamp="2025-11-25 09:28:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.502587734 +0000 UTC m=+103.313049728" watchObservedRunningTime="2025-11-25 09:29:40.502632555 +0000 UTC m=+103.313094569" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.551502 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=78.551482841 podStartE2EDuration="1m18.551482841s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.550630446 +0000 UTC m=+103.361092440" watchObservedRunningTime="2025-11-25 09:29:40.551482841 +0000 UTC m=+103.361944835" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.596836 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.596879 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.596888 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.596901 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.596911 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.599284 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-sqcpf" podStartSLOduration=78.599267275 podStartE2EDuration="1m18.599267275s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:40.598944625 +0000 UTC m=+103.409406639" watchObservedRunningTime="2025-11-25 09:29:40.599267275 +0000 UTC m=+103.409729269" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.608391 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:40 crc kubenswrapper[4734]: E1125 09:29:40.608528 4734 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:29:40 crc kubenswrapper[4734]: E1125 09:29:40.608593 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs podName:54363663-3559-4203-bf8f-03e3bf4d1127 nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.60857926 +0000 UTC m=+167.419041254 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs") pod "network-metrics-daemon-tfr8m" (UID: "54363663-3559-4203-bf8f-03e3bf4d1127") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.699320 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.699361 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.699376 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.699391 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.699401 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.801373 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.801460 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.801472 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.801489 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.801760 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.905850 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.905935 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.905958 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.905991 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.906017 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.996212 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.996298 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.996313 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.996339 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:29:40 crc kubenswrapper[4734]: I1125 09:29:40.996354 4734 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:29:40Z","lastTransitionTime":"2025-11-25T09:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.062378 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg"] Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.063139 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.067149 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.067906 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.067958 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.067956 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.214776 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fcc86090-582f-40ba-9753-0c52c21cd553-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.214846 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fcc86090-582f-40ba-9753-0c52c21cd553-service-ca\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.214907 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fcc86090-582f-40ba-9753-0c52c21cd553-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.214953 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fcc86090-582f-40ba-9753-0c52c21cd553-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.215068 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fcc86090-582f-40ba-9753-0c52c21cd553-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.316246 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fcc86090-582f-40ba-9753-0c52c21cd553-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.316355 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fcc86090-582f-40ba-9753-0c52c21cd553-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.316438 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fcc86090-582f-40ba-9753-0c52c21cd553-service-ca\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.316497 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fcc86090-582f-40ba-9753-0c52c21cd553-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.316514 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fcc86090-582f-40ba-9753-0c52c21cd553-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.316588 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fcc86090-582f-40ba-9753-0c52c21cd553-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.316662 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fcc86090-582f-40ba-9753-0c52c21cd553-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.317565 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fcc86090-582f-40ba-9753-0c52c21cd553-service-ca\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.324161 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fcc86090-582f-40ba-9753-0c52c21cd553-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.340936 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fcc86090-582f-40ba-9753-0c52c21cd553-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-6jttg\" (UID: \"fcc86090-582f-40ba-9753-0c52c21cd553\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.382009 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" Nov 25 09:29:41 crc kubenswrapper[4734]: W1125 09:29:41.393759 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfcc86090_582f_40ba_9753_0c52c21cd553.slice/crio-7e1b6c52eb03ab65e8ff564a07cc739151344461160e83648894ea8ea1bae451 WatchSource:0}: Error finding container 7e1b6c52eb03ab65e8ff564a07cc739151344461160e83648894ea8ea1bae451: Status 404 returned error can't find the container with id 7e1b6c52eb03ab65e8ff564a07cc739151344461160e83648894ea8ea1bae451 Nov 25 09:29:41 crc kubenswrapper[4734]: I1125 09:29:41.964448 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" event={"ID":"fcc86090-582f-40ba-9753-0c52c21cd553","Type":"ContainerStarted","Data":"7e1b6c52eb03ab65e8ff564a07cc739151344461160e83648894ea8ea1bae451"} Nov 25 09:29:42 crc kubenswrapper[4734]: I1125 09:29:42.245930 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:42 crc kubenswrapper[4734]: I1125 09:29:42.246033 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:42 crc kubenswrapper[4734]: E1125 09:29:42.246150 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:42 crc kubenswrapper[4734]: I1125 09:29:42.246165 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:42 crc kubenswrapper[4734]: I1125 09:29:42.245941 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:42 crc kubenswrapper[4734]: E1125 09:29:42.246315 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:42 crc kubenswrapper[4734]: E1125 09:29:42.246380 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:42 crc kubenswrapper[4734]: E1125 09:29:42.246627 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:42 crc kubenswrapper[4734]: I1125 09:29:42.248217 4734 scope.go:117] "RemoveContainer" containerID="4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a" Nov 25 09:29:42 crc kubenswrapper[4734]: E1125 09:29:42.248452 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:29:42 crc kubenswrapper[4734]: I1125 09:29:42.970548 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" event={"ID":"fcc86090-582f-40ba-9753-0c52c21cd553","Type":"ContainerStarted","Data":"a5292cb206626349073c52702962e245dbc9b65be4cc7dad152d7cfd0dc8e5fc"} Nov 25 09:29:42 crc kubenswrapper[4734]: I1125 09:29:42.992567 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6jttg" podStartSLOduration=80.992536931 podStartE2EDuration="1m20.992536931s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:29:42.991783489 +0000 UTC m=+105.802245523" watchObservedRunningTime="2025-11-25 09:29:42.992536931 +0000 UTC m=+105.802998965" Nov 25 09:29:44 crc kubenswrapper[4734]: I1125 09:29:44.246339 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:44 crc kubenswrapper[4734]: I1125 09:29:44.246453 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:44 crc kubenswrapper[4734]: E1125 09:29:44.246568 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:44 crc kubenswrapper[4734]: I1125 09:29:44.246586 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:44 crc kubenswrapper[4734]: I1125 09:29:44.246607 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:44 crc kubenswrapper[4734]: E1125 09:29:44.246718 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:44 crc kubenswrapper[4734]: E1125 09:29:44.246850 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:44 crc kubenswrapper[4734]: E1125 09:29:44.246983 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:46 crc kubenswrapper[4734]: I1125 09:29:46.246172 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:46 crc kubenswrapper[4734]: I1125 09:29:46.246218 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:46 crc kubenswrapper[4734]: I1125 09:29:46.246252 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:46 crc kubenswrapper[4734]: E1125 09:29:46.246390 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:46 crc kubenswrapper[4734]: E1125 09:29:46.246537 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:46 crc kubenswrapper[4734]: E1125 09:29:46.246743 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:46 crc kubenswrapper[4734]: I1125 09:29:46.247126 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:46 crc kubenswrapper[4734]: E1125 09:29:46.247295 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:48 crc kubenswrapper[4734]: I1125 09:29:48.246331 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:48 crc kubenswrapper[4734]: I1125 09:29:48.246355 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:48 crc kubenswrapper[4734]: E1125 09:29:48.247148 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:48 crc kubenswrapper[4734]: I1125 09:29:48.246550 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:48 crc kubenswrapper[4734]: I1125 09:29:48.246502 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:48 crc kubenswrapper[4734]: E1125 09:29:48.247277 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:48 crc kubenswrapper[4734]: E1125 09:29:48.247212 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:48 crc kubenswrapper[4734]: E1125 09:29:48.247425 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:50 crc kubenswrapper[4734]: I1125 09:29:50.246128 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:50 crc kubenswrapper[4734]: I1125 09:29:50.246193 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:50 crc kubenswrapper[4734]: I1125 09:29:50.246286 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:50 crc kubenswrapper[4734]: E1125 09:29:50.248334 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:50 crc kubenswrapper[4734]: I1125 09:29:50.248414 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:50 crc kubenswrapper[4734]: E1125 09:29:50.248473 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:50 crc kubenswrapper[4734]: E1125 09:29:50.248622 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:50 crc kubenswrapper[4734]: E1125 09:29:50.248714 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:52 crc kubenswrapper[4734]: I1125 09:29:52.247288 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:52 crc kubenswrapper[4734]: I1125 09:29:52.247319 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:52 crc kubenswrapper[4734]: I1125 09:29:52.247356 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:52 crc kubenswrapper[4734]: E1125 09:29:52.247424 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:52 crc kubenswrapper[4734]: I1125 09:29:52.247593 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:52 crc kubenswrapper[4734]: E1125 09:29:52.247874 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:52 crc kubenswrapper[4734]: E1125 09:29:52.247917 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:52 crc kubenswrapper[4734]: E1125 09:29:52.248011 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:54 crc kubenswrapper[4734]: I1125 09:29:54.246833 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:54 crc kubenswrapper[4734]: E1125 09:29:54.247004 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:54 crc kubenswrapper[4734]: I1125 09:29:54.246832 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:54 crc kubenswrapper[4734]: E1125 09:29:54.247152 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:54 crc kubenswrapper[4734]: I1125 09:29:54.247208 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:54 crc kubenswrapper[4734]: E1125 09:29:54.247262 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:54 crc kubenswrapper[4734]: I1125 09:29:54.247310 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:54 crc kubenswrapper[4734]: E1125 09:29:54.247369 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:55 crc kubenswrapper[4734]: I1125 09:29:55.247196 4734 scope.go:117] "RemoveContainer" containerID="4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a" Nov 25 09:29:55 crc kubenswrapper[4734]: E1125 09:29:55.247364 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:29:56 crc kubenswrapper[4734]: I1125 09:29:56.246717 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:56 crc kubenswrapper[4734]: I1125 09:29:56.247126 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:56 crc kubenswrapper[4734]: I1125 09:29:56.247150 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:56 crc kubenswrapper[4734]: E1125 09:29:56.247413 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:56 crc kubenswrapper[4734]: E1125 09:29:56.247549 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:56 crc kubenswrapper[4734]: E1125 09:29:56.247707 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:56 crc kubenswrapper[4734]: I1125 09:29:56.247883 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:56 crc kubenswrapper[4734]: E1125 09:29:56.248024 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.025741 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/1.log" Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.026438 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/0.log" Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.026513 4734 generic.go:334] "Generic (PLEG): container finished" podID="80259512-c4ac-4362-b21e-386796e31645" containerID="babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08" exitCode=1 Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.026561 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7t7mh" event={"ID":"80259512-c4ac-4362-b21e-386796e31645","Type":"ContainerDied","Data":"babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08"} Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.026613 4734 scope.go:117] "RemoveContainer" containerID="748c24695faec3deb8ce02f97e62601bcf2dfcb6cc75d54021abe1ef39f758bc" Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.027200 4734 scope.go:117] "RemoveContainer" containerID="babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08" Nov 25 09:29:58 crc kubenswrapper[4734]: E1125 09:29:58.027455 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-7t7mh_openshift-multus(80259512-c4ac-4362-b21e-386796e31645)\"" pod="openshift-multus/multus-7t7mh" podUID="80259512-c4ac-4362-b21e-386796e31645" Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.246192 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.246310 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.246402 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:29:58 crc kubenswrapper[4734]: I1125 09:29:58.246428 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:29:58 crc kubenswrapper[4734]: E1125 09:29:58.246886 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:29:58 crc kubenswrapper[4734]: E1125 09:29:58.246993 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:29:58 crc kubenswrapper[4734]: E1125 09:29:58.247097 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:29:58 crc kubenswrapper[4734]: E1125 09:29:58.247131 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:29:59 crc kubenswrapper[4734]: I1125 09:29:59.031909 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/1.log" Nov 25 09:30:00 crc kubenswrapper[4734]: E1125 09:30:00.213748 4734 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 09:30:00 crc kubenswrapper[4734]: I1125 09:30:00.246538 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:00 crc kubenswrapper[4734]: E1125 09:30:00.247506 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:00 crc kubenswrapper[4734]: I1125 09:30:00.247527 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:00 crc kubenswrapper[4734]: I1125 09:30:00.247558 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:00 crc kubenswrapper[4734]: E1125 09:30:00.247596 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:00 crc kubenswrapper[4734]: I1125 09:30:00.247599 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:00 crc kubenswrapper[4734]: E1125 09:30:00.247734 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:00 crc kubenswrapper[4734]: E1125 09:30:00.247902 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:00 crc kubenswrapper[4734]: E1125 09:30:00.369100 4734 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:30:02 crc kubenswrapper[4734]: I1125 09:30:02.246364 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:02 crc kubenswrapper[4734]: I1125 09:30:02.246363 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:02 crc kubenswrapper[4734]: I1125 09:30:02.246457 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:02 crc kubenswrapper[4734]: I1125 09:30:02.246492 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:02 crc kubenswrapper[4734]: E1125 09:30:02.246591 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:02 crc kubenswrapper[4734]: E1125 09:30:02.246835 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:02 crc kubenswrapper[4734]: E1125 09:30:02.247007 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:02 crc kubenswrapper[4734]: E1125 09:30:02.247119 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:04 crc kubenswrapper[4734]: I1125 09:30:04.246793 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:04 crc kubenswrapper[4734]: I1125 09:30:04.246743 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:04 crc kubenswrapper[4734]: I1125 09:30:04.246911 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:04 crc kubenswrapper[4734]: E1125 09:30:04.246936 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:04 crc kubenswrapper[4734]: I1125 09:30:04.247107 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:04 crc kubenswrapper[4734]: E1125 09:30:04.247194 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:04 crc kubenswrapper[4734]: E1125 09:30:04.247303 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:04 crc kubenswrapper[4734]: E1125 09:30:04.247452 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:05 crc kubenswrapper[4734]: E1125 09:30:05.371013 4734 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:30:06 crc kubenswrapper[4734]: I1125 09:30:06.246362 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:06 crc kubenswrapper[4734]: I1125 09:30:06.246489 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:06 crc kubenswrapper[4734]: I1125 09:30:06.246392 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:06 crc kubenswrapper[4734]: E1125 09:30:06.246592 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:06 crc kubenswrapper[4734]: E1125 09:30:06.246796 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:06 crc kubenswrapper[4734]: I1125 09:30:06.246944 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:06 crc kubenswrapper[4734]: E1125 09:30:06.247069 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:06 crc kubenswrapper[4734]: E1125 09:30:06.247219 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:06 crc kubenswrapper[4734]: I1125 09:30:06.248343 4734 scope.go:117] "RemoveContainer" containerID="4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a" Nov 25 09:30:06 crc kubenswrapper[4734]: E1125 09:30:06.248611 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-2vvjj_openshift-ovn-kubernetes(cbdcaaef-9e1d-421f-b1fa-05223f0067af)\"" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" Nov 25 09:30:08 crc kubenswrapper[4734]: I1125 09:30:08.246423 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:08 crc kubenswrapper[4734]: I1125 09:30:08.246496 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:08 crc kubenswrapper[4734]: I1125 09:30:08.246524 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:08 crc kubenswrapper[4734]: E1125 09:30:08.246632 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:08 crc kubenswrapper[4734]: I1125 09:30:08.246647 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:08 crc kubenswrapper[4734]: E1125 09:30:08.246754 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:08 crc kubenswrapper[4734]: E1125 09:30:08.246900 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:08 crc kubenswrapper[4734]: E1125 09:30:08.246994 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:10 crc kubenswrapper[4734]: I1125 09:30:10.246528 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:10 crc kubenswrapper[4734]: E1125 09:30:10.246693 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:10 crc kubenswrapper[4734]: I1125 09:30:10.246722 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:10 crc kubenswrapper[4734]: I1125 09:30:10.246814 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:10 crc kubenswrapper[4734]: E1125 09:30:10.248304 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:10 crc kubenswrapper[4734]: I1125 09:30:10.248337 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:10 crc kubenswrapper[4734]: E1125 09:30:10.248497 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:10 crc kubenswrapper[4734]: E1125 09:30:10.248544 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:10 crc kubenswrapper[4734]: E1125 09:30:10.372152 4734 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:30:12 crc kubenswrapper[4734]: I1125 09:30:12.246995 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:12 crc kubenswrapper[4734]: I1125 09:30:12.246996 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:12 crc kubenswrapper[4734]: I1125 09:30:12.247145 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:12 crc kubenswrapper[4734]: I1125 09:30:12.247220 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:12 crc kubenswrapper[4734]: E1125 09:30:12.247257 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:12 crc kubenswrapper[4734]: E1125 09:30:12.247339 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:12 crc kubenswrapper[4734]: E1125 09:30:12.247390 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:12 crc kubenswrapper[4734]: E1125 09:30:12.247544 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:13 crc kubenswrapper[4734]: I1125 09:30:13.247452 4734 scope.go:117] "RemoveContainer" containerID="babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08" Nov 25 09:30:14 crc kubenswrapper[4734]: I1125 09:30:14.081439 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/1.log" Nov 25 09:30:14 crc kubenswrapper[4734]: I1125 09:30:14.081758 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7t7mh" event={"ID":"80259512-c4ac-4362-b21e-386796e31645","Type":"ContainerStarted","Data":"c0ee7992858b9fd7a10962667478f845a8b192cef15d0cf18988e653d1d097ac"} Nov 25 09:30:14 crc kubenswrapper[4734]: I1125 09:30:14.246632 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:14 crc kubenswrapper[4734]: I1125 09:30:14.246631 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:14 crc kubenswrapper[4734]: I1125 09:30:14.246631 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:14 crc kubenswrapper[4734]: I1125 09:30:14.246649 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:14 crc kubenswrapper[4734]: E1125 09:30:14.246759 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:14 crc kubenswrapper[4734]: E1125 09:30:14.246964 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:14 crc kubenswrapper[4734]: E1125 09:30:14.247018 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:14 crc kubenswrapper[4734]: E1125 09:30:14.247071 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:15 crc kubenswrapper[4734]: E1125 09:30:15.373505 4734 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:30:16 crc kubenswrapper[4734]: I1125 09:30:16.246857 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:16 crc kubenswrapper[4734]: I1125 09:30:16.246932 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:16 crc kubenswrapper[4734]: I1125 09:30:16.247010 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:16 crc kubenswrapper[4734]: I1125 09:30:16.247018 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:16 crc kubenswrapper[4734]: E1125 09:30:16.247070 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:16 crc kubenswrapper[4734]: E1125 09:30:16.247254 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:16 crc kubenswrapper[4734]: E1125 09:30:16.247384 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:16 crc kubenswrapper[4734]: E1125 09:30:16.247487 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:18 crc kubenswrapper[4734]: I1125 09:30:18.246683 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:18 crc kubenswrapper[4734]: I1125 09:30:18.246933 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:18 crc kubenswrapper[4734]: I1125 09:30:18.246955 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:18 crc kubenswrapper[4734]: E1125 09:30:18.247041 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:18 crc kubenswrapper[4734]: I1125 09:30:18.247114 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:18 crc kubenswrapper[4734]: E1125 09:30:18.247122 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:18 crc kubenswrapper[4734]: E1125 09:30:18.247193 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:18 crc kubenswrapper[4734]: E1125 09:30:18.247280 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:18 crc kubenswrapper[4734]: I1125 09:30:18.247514 4734 scope.go:117] "RemoveContainer" containerID="4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a" Nov 25 09:30:19 crc kubenswrapper[4734]: I1125 09:30:19.099538 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/3.log" Nov 25 09:30:19 crc kubenswrapper[4734]: I1125 09:30:19.101636 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerStarted","Data":"43fd484a9960544cf74472150ca51b7c7efdc7a3c46fe5c2f855fde6e0cc5b04"} Nov 25 09:30:19 crc kubenswrapper[4734]: I1125 09:30:19.102046 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:30:19 crc kubenswrapper[4734]: I1125 09:30:19.130278 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podStartSLOduration=117.130258352 podStartE2EDuration="1m57.130258352s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:19.129415097 +0000 UTC m=+141.939877091" watchObservedRunningTime="2025-11-25 09:30:19.130258352 +0000 UTC m=+141.940720346" Nov 25 09:30:19 crc kubenswrapper[4734]: I1125 09:30:19.516347 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-tfr8m"] Nov 25 09:30:19 crc kubenswrapper[4734]: I1125 09:30:19.516461 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:19 crc kubenswrapper[4734]: E1125 09:30:19.516568 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:20 crc kubenswrapper[4734]: I1125 09:30:20.246521 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:20 crc kubenswrapper[4734]: I1125 09:30:20.246533 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:20 crc kubenswrapper[4734]: I1125 09:30:20.247632 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:20 crc kubenswrapper[4734]: E1125 09:30:20.247630 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:20 crc kubenswrapper[4734]: E1125 09:30:20.247810 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:20 crc kubenswrapper[4734]: E1125 09:30:20.248221 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:20 crc kubenswrapper[4734]: E1125 09:30:20.374174 4734 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:30:21 crc kubenswrapper[4734]: I1125 09:30:21.246212 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:21 crc kubenswrapper[4734]: E1125 09:30:21.246338 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:22 crc kubenswrapper[4734]: I1125 09:30:22.246525 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:22 crc kubenswrapper[4734]: I1125 09:30:22.246538 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:22 crc kubenswrapper[4734]: I1125 09:30:22.247010 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:22 crc kubenswrapper[4734]: E1125 09:30:22.247201 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:22 crc kubenswrapper[4734]: E1125 09:30:22.247330 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:22 crc kubenswrapper[4734]: E1125 09:30:22.247466 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:23 crc kubenswrapper[4734]: I1125 09:30:23.246268 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:23 crc kubenswrapper[4734]: E1125 09:30:23.246400 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:24 crc kubenswrapper[4734]: I1125 09:30:24.246864 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:24 crc kubenswrapper[4734]: I1125 09:30:24.246925 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:24 crc kubenswrapper[4734]: E1125 09:30:24.246997 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:30:24 crc kubenswrapper[4734]: I1125 09:30:24.247107 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:24 crc kubenswrapper[4734]: E1125 09:30:24.247251 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:30:24 crc kubenswrapper[4734]: E1125 09:30:24.247322 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:30:25 crc kubenswrapper[4734]: I1125 09:30:25.246348 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:25 crc kubenswrapper[4734]: E1125 09:30:25.246595 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-tfr8m" podUID="54363663-3559-4203-bf8f-03e3bf4d1127" Nov 25 09:30:26 crc kubenswrapper[4734]: I1125 09:30:26.246151 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:26 crc kubenswrapper[4734]: I1125 09:30:26.246199 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:26 crc kubenswrapper[4734]: I1125 09:30:26.246212 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:26 crc kubenswrapper[4734]: I1125 09:30:26.247987 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 09:30:26 crc kubenswrapper[4734]: I1125 09:30:26.248209 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 09:30:26 crc kubenswrapper[4734]: I1125 09:30:26.249132 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 09:30:26 crc kubenswrapper[4734]: I1125 09:30:26.251269 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 09:30:27 crc kubenswrapper[4734]: I1125 09:30:27.245944 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:27 crc kubenswrapper[4734]: I1125 09:30:27.248474 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 09:30:27 crc kubenswrapper[4734]: I1125 09:30:27.248494 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.172970 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:30 crc kubenswrapper[4734]: E1125 09:30:30.173062 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:32:32.173042874 +0000 UTC m=+274.983504868 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.173351 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.173387 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.174455 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.179130 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.274032 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.274118 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.277062 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.278998 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.464309 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.470372 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:30:30 crc kubenswrapper[4734]: I1125 09:30:30.475053 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:30 crc kubenswrapper[4734]: W1125 09:30:30.700935 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-9662e02db3734a5f14566a01f2ed98b93728d90792299dc58a0daf1b2a6b0d02 WatchSource:0}: Error finding container 9662e02db3734a5f14566a01f2ed98b93728d90792299dc58a0daf1b2a6b0d02: Status 404 returned error can't find the container with id 9662e02db3734a5f14566a01f2ed98b93728d90792299dc58a0daf1b2a6b0d02 Nov 25 09:30:30 crc kubenswrapper[4734]: W1125 09:30:30.965678 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-522ca31eacb4705751498ee6f5102ed8cdea0341230ea83f9287202e491b80d3 WatchSource:0}: Error finding container 522ca31eacb4705751498ee6f5102ed8cdea0341230ea83f9287202e491b80d3: Status 404 returned error can't find the container with id 522ca31eacb4705751498ee6f5102ed8cdea0341230ea83f9287202e491b80d3 Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.136257 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"3c7962356f0d284fbdb3143206eae34ffb493dc2a59331a2cf5e8cb1897fb598"} Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.136592 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"522ca31eacb4705751498ee6f5102ed8cdea0341230ea83f9287202e491b80d3"} Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.136764 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.137960 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"8c4d8e0228442310c5e2fda146d930eae42d6ea0369063e6459c9005102e34e3"} Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.138001 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"9662e02db3734a5f14566a01f2ed98b93728d90792299dc58a0daf1b2a6b0d02"} Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.139326 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"d684647b378c12a4fbafd968581e9e8d6069ba546ae3e57d974a6dd7567295b8"} Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.139365 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"8679d3b25404725102d96e22235a4cb7d67828c5300c3f0bfc46d9a30ae3cef9"} Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.869764 4734 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.915891 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b78xz"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.916812 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.918193 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-64gk2"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.918944 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.921048 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.921761 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.921911 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.922403 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.923752 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.924036 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.949177 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-dcwsq"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.950268 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-xtsd4"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.963555 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.964296 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.967587 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-dqksf"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.968182 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.968535 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.969010 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.969127 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.969616 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.970143 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.970345 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.970596 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.970751 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.971055 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.971280 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.971473 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.971709 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.971922 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.972187 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.972395 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.972596 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.973634 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.976979 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.977158 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.977280 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.977381 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.977487 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.977664 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.977857 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.977898 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978054 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.979021 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-7tm4l"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978076 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.979174 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978137 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978321 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978307 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978354 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.979450 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978386 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978398 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978454 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978515 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.979549 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.979615 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978500 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978573 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.978573 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.980137 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.980227 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xzqtm"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.980248 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.980937 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.981661 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-6m6pm"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.982403 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.983032 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.983443 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.983884 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8"] Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.985210 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.992874 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993143 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993155 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993248 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993370 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993498 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993605 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993671 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993773 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993848 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993865 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993936 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993971 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.994019 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993608 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.994154 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993883 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.994249 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.994103 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.994158 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.993775 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.994809 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.994986 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.995244 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.996249 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 09:30:31 crc kubenswrapper[4734]: I1125 09:30:31.997016 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.001570 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.002967 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003021 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-etcd-serving-ca\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003045 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6hgf\" (UniqueName: \"kubernetes.io/projected/be35ecf5-83be-4063-9d6a-939bd1a78def-kube-api-access-g6hgf\") pod \"downloads-7954f5f757-dqksf\" (UID: \"be35ecf5-83be-4063-9d6a-939bd1a78def\") " pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003097 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvd6t\" (UniqueName: \"kubernetes.io/projected/b15a18b7-d1cb-4054-9e79-89e2681747f2-kube-api-access-fvd6t\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003123 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/68456817-e55c-47f3-a4d6-c1f11bac172c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6mckc\" (UID: \"68456817-e55c-47f3-a4d6-c1f11bac172c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003144 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003173 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz4dg\" (UniqueName: \"kubernetes.io/projected/b4504e14-e8ec-4fea-acff-0848c20861b0-kube-api-access-rz4dg\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003191 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b15a18b7-d1cb-4054-9e79-89e2681747f2-serving-cert\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003209 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-config\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003226 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.003226 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004276 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-node-pullsecrets\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004338 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-image-import-ca\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004364 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-encryption-config\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004405 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/221a9776-8306-4dda-b262-3152d9bb212e-config\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004439 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-etcd-client\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004478 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fd882e25-5875-43ed-9d01-34b92fe44587-audit-dir\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004521 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4442p\" (UniqueName: \"kubernetes.io/projected/221a9776-8306-4dda-b262-3152d9bb212e-kube-api-access-4442p\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004558 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjjbn\" (UniqueName: \"kubernetes.io/projected/692575a0-8869-436e-b912-838ad302c4bd-kube-api-access-zjjbn\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004642 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-client-ca\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004679 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/221a9776-8306-4dda-b262-3152d9bb212e-machine-approver-tls\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004730 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-config\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004772 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/221a9776-8306-4dda-b262-3152d9bb212e-auth-proxy-config\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004807 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-config\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004845 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004890 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004940 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-images\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.004997 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-config\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005041 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005176 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005217 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/30dd5c51-7a6e-4571-b3ae-c33150488286-audit-dir\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005245 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005301 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-encryption-config\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005331 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-serving-cert\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005366 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005402 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/692575a0-8869-436e-b912-838ad302c4bd-config\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005429 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khvh2\" (UniqueName: \"kubernetes.io/projected/fd882e25-5875-43ed-9d01-34b92fe44587-kube-api-access-khvh2\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005457 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-audit\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005486 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-audit-dir\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005520 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k28mz\" (UniqueName: \"kubernetes.io/projected/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-kube-api-access-k28mz\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005554 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-audit-policies\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005588 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv6gr\" (UniqueName: \"kubernetes.io/projected/68456817-e55c-47f3-a4d6-c1f11bac172c-kube-api-access-rv6gr\") pod \"cluster-samples-operator-665b6dd947-6mckc\" (UID: \"68456817-e55c-47f3-a4d6-c1f11bac172c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005619 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-serving-cert\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005648 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005716 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005790 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005817 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-etcd-client\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005847 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-client-ca\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005864 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/692575a0-8869-436e-b912-838ad302c4bd-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005892 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cxlr\" (UniqueName: \"kubernetes.io/projected/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-kube-api-access-2cxlr\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005849 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005916 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-serving-cert\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005951 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005974 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jnsz\" (UniqueName: \"kubernetes.io/projected/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-kube-api-access-6jnsz\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.005990 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.006010 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.006032 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4504e14-e8ec-4fea-acff-0848c20861b0-serving-cert\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.006048 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cz6c8\" (UniqueName: \"kubernetes.io/projected/30dd5c51-7a6e-4571-b3ae-c33150488286-kube-api-access-cz6c8\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.006063 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-audit-policies\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.006095 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.006112 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.006112 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.009175 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.011282 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.011586 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.061336 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.062620 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.068866 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.069225 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.069532 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.088152 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.088532 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.089821 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.089898 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090164 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090219 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090330 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090397 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090477 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090553 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090596 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090712 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.090817 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.091204 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.091467 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.091809 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092023 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092040 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092226 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092259 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092290 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092298 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092213 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092453 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092469 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092486 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.092776 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.094134 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.094278 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.094336 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.094821 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.095669 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.096059 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.096071 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.097214 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.102520 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.103204 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.103550 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.103688 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.103551 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.107978 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jnsz\" (UniqueName: \"kubernetes.io/projected/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-kube-api-access-6jnsz\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108026 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108051 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c14528b-b8c0-4b6d-90c8-95904704d096-console-serving-cert\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108077 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108113 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3c1294ee-c0f8-487b-9639-1281c3b687ed-proxy-tls\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108175 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62d3326f-6c05-47b4-852f-00ca08c502d1-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108210 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-service-ca-bundle\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108244 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd6c09ef-f524-4ca9-9104-f506b6784087-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108274 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4504e14-e8ec-4fea-acff-0848c20861b0-serving-cert\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108309 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cz6c8\" (UniqueName: \"kubernetes.io/projected/30dd5c51-7a6e-4571-b3ae-c33150488286-kube-api-access-cz6c8\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108333 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-audit-policies\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108357 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108379 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108402 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108432 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-etcd-serving-ca\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108457 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6hgf\" (UniqueName: \"kubernetes.io/projected/be35ecf5-83be-4063-9d6a-939bd1a78def-kube-api-access-g6hgf\") pod \"downloads-7954f5f757-dqksf\" (UID: \"be35ecf5-83be-4063-9d6a-939bd1a78def\") " pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108482 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvd6t\" (UniqueName: \"kubernetes.io/projected/b15a18b7-d1cb-4054-9e79-89e2681747f2-kube-api-access-fvd6t\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108505 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/68456817-e55c-47f3-a4d6-c1f11bac172c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6mckc\" (UID: \"68456817-e55c-47f3-a4d6-c1f11bac172c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108528 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108552 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-ca\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108575 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz4dg\" (UniqueName: \"kubernetes.io/projected/b4504e14-e8ec-4fea-acff-0848c20861b0-kube-api-access-rz4dg\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108596 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b15a18b7-d1cb-4054-9e79-89e2681747f2-serving-cert\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108621 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-console-config\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108645 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-config\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108668 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108691 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-node-pullsecrets\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108712 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-image-import-ca\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108748 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-encryption-config\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108770 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c14528b-b8c0-4b6d-90c8-95904704d096-console-oauth-config\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108794 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-config\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108825 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/221a9776-8306-4dda-b262-3152d9bb212e-config\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108848 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-etcd-client\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108872 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fd882e25-5875-43ed-9d01-34b92fe44587-audit-dir\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108898 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6llb\" (UniqueName: \"kubernetes.io/projected/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-kube-api-access-x6llb\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108949 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b18276dc-c893-45f6-a36d-37a8ae844715-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.108976 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4442p\" (UniqueName: \"kubernetes.io/projected/221a9776-8306-4dda-b262-3152d9bb212e-kube-api-access-4442p\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109004 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjjbn\" (UniqueName: \"kubernetes.io/projected/692575a0-8869-436e-b912-838ad302c4bd-kube-api-access-zjjbn\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109028 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npwrf\" (UniqueName: \"kubernetes.io/projected/62d3326f-6c05-47b4-852f-00ca08c502d1-kube-api-access-npwrf\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109063 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-client-ca\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109106 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/221a9776-8306-4dda-b262-3152d9bb212e-machine-approver-tls\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109129 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb5xv\" (UniqueName: \"kubernetes.io/projected/3c14528b-b8c0-4b6d-90c8-95904704d096-kube-api-access-nb5xv\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109157 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd6c09ef-f524-4ca9-9104-f506b6784087-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109184 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-config\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109206 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22spz\" (UniqueName: \"kubernetes.io/projected/f502af72-4499-47f8-adbf-8c1fa6aeddf9-kube-api-access-22spz\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109229 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62d3326f-6c05-47b4-852f-00ca08c502d1-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109254 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/221a9776-8306-4dda-b262-3152d9bb212e-auth-proxy-config\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109280 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-trusted-ca-bundle\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109303 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-config\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109329 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109358 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109384 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-images\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109409 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b18276dc-c893-45f6-a36d-37a8ae844715-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109433 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-config\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109458 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109488 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109513 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-service-ca\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109535 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109559 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/30dd5c51-7a6e-4571-b3ae-c33150488286-audit-dir\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109585 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109609 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twgnp\" (UniqueName: \"kubernetes.io/projected/f544d759-f5c6-430f-984e-c0768447ae1d-kube-api-access-twgnp\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109631 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f544d759-f5c6-430f-984e-c0768447ae1d-serving-cert\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109656 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-encryption-config\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109679 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-serving-cert\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109702 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jl2r5\" (UniqueName: \"kubernetes.io/projected/3c1294ee-c0f8-487b-9639-1281c3b687ed-kube-api-access-jl2r5\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109703 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109723 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-config\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109746 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd6c09ef-f524-4ca9-9104-f506b6784087-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109770 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109794 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-client\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109818 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/692575a0-8869-436e-b912-838ad302c4bd-config\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109852 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khvh2\" (UniqueName: \"kubernetes.io/projected/fd882e25-5875-43ed-9d01-34b92fe44587-kube-api-access-khvh2\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109876 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-srv-cert\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109898 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-audit\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109919 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-service-ca\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109941 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-oauth-serving-cert\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.109978 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-audit-dir\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110001 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f502af72-4499-47f8-adbf-8c1fa6aeddf9-serving-cert\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110027 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k28mz\" (UniqueName: \"kubernetes.io/projected/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-kube-api-access-k28mz\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110067 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-audit-policies\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110114 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv6gr\" (UniqueName: \"kubernetes.io/projected/68456817-e55c-47f3-a4d6-c1f11bac172c-kube-api-access-rv6gr\") pod \"cluster-samples-operator-665b6dd947-6mckc\" (UID: \"68456817-e55c-47f3-a4d6-c1f11bac172c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110138 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-serving-cert\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110161 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110186 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110213 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110235 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-etcd-client\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110259 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-client-ca\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110280 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/692575a0-8869-436e-b912-838ad302c4bd-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110306 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3c1294ee-c0f8-487b-9639-1281c3b687ed-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110332 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110357 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cxlr\" (UniqueName: \"kubernetes.io/projected/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-kube-api-access-2cxlr\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110379 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b18276dc-c893-45f6-a36d-37a8ae844715-config\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110401 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-serving-cert\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110426 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.110522 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/221a9776-8306-4dda-b262-3152d9bb212e-config\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.112400 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.112513 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.114798 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-node-pullsecrets\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.114938 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.115008 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fd882e25-5875-43ed-9d01-34b92fe44587-audit-dir\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.115800 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-audit-policies\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.115889 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-config\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.116468 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.116534 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.116608 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.116775 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-config\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.117340 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/221a9776-8306-4dda-b262-3152d9bb212e-auth-proxy-config\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.117704 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-audit-dir\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.118502 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/692575a0-8869-436e-b912-838ad302c4bd-config\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.120004 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.120524 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-audit\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.122056 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-n7f6b"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.124494 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.125016 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2phng"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.125757 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/692575a0-8869-436e-b912-838ad302c4bd-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.126927 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-etcd-serving-ca\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.127276 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-audit-policies\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.127481 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-client-ca\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.128838 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-2phng" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.129112 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.129672 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.129917 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.129966 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.130070 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-client-ca\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.131433 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-image-import-ca\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.131801 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-serving-cert\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.132639 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-config\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.132948 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b15a18b7-d1cb-4054-9e79-89e2681747f2-serving-cert\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.133929 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/221a9776-8306-4dda-b262-3152d9bb212e-machine-approver-tls\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.134039 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.135771 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/30dd5c51-7a6e-4571-b3ae-c33150488286-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.136340 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.136350 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-etcd-client\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.136856 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-serving-cert\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.145179 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-images\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.141604 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.143985 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.144226 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-config\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.144267 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/30dd5c51-7a6e-4571-b3ae-c33150488286-audit-dir\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.144546 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.139957 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-etcd-client\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.146407 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-serving-cert\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.146277 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.147074 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/68456817-e55c-47f3-a4d6-c1f11bac172c-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-6mckc\" (UID: \"68456817-e55c-47f3-a4d6-c1f11bac172c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.148403 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4504e14-e8ec-4fea-acff-0848c20861b0-serving-cert\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.150194 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.157739 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.158018 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.158328 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.159108 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-encryption-config\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.159544 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.165236 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pmwh4"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.166057 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-67tg6"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.166448 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.166533 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.167052 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.167351 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/30dd5c51-7a6e-4571-b3ae-c33150488286-encryption-config\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.167495 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.167538 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.168427 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.168991 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.169746 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.170220 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.183639 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.186136 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.194138 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.196337 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.196538 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.197295 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.200168 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.202207 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4qpf8"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.203110 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.207003 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b78xz"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.207395 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-pc9l6"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.208624 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.210310 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.211163 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.211961 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-srv-cert\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212005 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-service-ca\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212028 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-oauth-serving-cert\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212049 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f502af72-4499-47f8-adbf-8c1fa6aeddf9-serving-cert\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212132 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-apiservice-cert\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212160 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3c1294ee-c0f8-487b-9639-1281c3b687ed-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212176 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212198 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b18276dc-c893-45f6-a36d-37a8ae844715-config\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212219 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/94b4f653-06ce-483a-9da8-9cf32064525e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nr7rm\" (UID: \"94b4f653-06ce-483a-9da8-9cf32064525e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212244 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c14528b-b8c0-4b6d-90c8-95904704d096-console-serving-cert\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212267 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3c1294ee-c0f8-487b-9639-1281c3b687ed-proxy-tls\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212286 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62d3326f-6c05-47b4-852f-00ca08c502d1-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212306 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-service-ca-bundle\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212323 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd6c09ef-f524-4ca9-9104-f506b6784087-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.212798 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-86g8w"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.214479 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-service-ca-bundle\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.214953 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/665eefbd-7f4c-485c-9822-f4f3cc1a67a5-metrics-tls\") pod \"dns-operator-744455d44c-2phng\" (UID: \"665eefbd-7f4c-485c-9822-f4f3cc1a67a5\") " pod="openshift-dns-operator/dns-operator-744455d44c-2phng" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.216405 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgsvv\" (UniqueName: \"kubernetes.io/projected/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-kube-api-access-lgsvv\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.215006 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-oauth-serving-cert\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.216010 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b18276dc-c893-45f6-a36d-37a8ae844715-config\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.216051 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62d3326f-6c05-47b4-852f-00ca08c502d1-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.216194 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.216551 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-tmpfs\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.216289 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.215006 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3c1294ee-c0f8-487b-9639-1281c3b687ed-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.217040 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-ca\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.217112 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-console-config\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.217151 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c14528b-b8c0-4b6d-90c8-95904704d096-console-oauth-config\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.217193 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.217215 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-64gk2"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.216253 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd6c09ef-f524-4ca9-9104-f506b6784087-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.217595 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.217173 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-webhook-cert\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218487 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-config\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218517 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b18276dc-c893-45f6-a36d-37a8ae844715-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218542 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6llb\" (UniqueName: \"kubernetes.io/projected/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-kube-api-access-x6llb\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218614 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb5xv\" (UniqueName: \"kubernetes.io/projected/3c14528b-b8c0-4b6d-90c8-95904704d096-kube-api-access-nb5xv\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218637 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npwrf\" (UniqueName: \"kubernetes.io/projected/62d3326f-6c05-47b4-852f-00ca08c502d1-kube-api-access-npwrf\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.217954 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-ca\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218667 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd6c09ef-f524-4ca9-9104-f506b6784087-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218697 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff5kj\" (UniqueName: \"kubernetes.io/projected/665eefbd-7f4c-485c-9822-f4f3cc1a67a5-kube-api-access-ff5kj\") pod \"dns-operator-744455d44c-2phng\" (UID: \"665eefbd-7f4c-485c-9822-f4f3cc1a67a5\") " pod="openshift-dns-operator/dns-operator-744455d44c-2phng" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218723 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22spz\" (UniqueName: \"kubernetes.io/projected/f502af72-4499-47f8-adbf-8c1fa6aeddf9-kube-api-access-22spz\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218741 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62d3326f-6c05-47b4-852f-00ca08c502d1-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218765 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-trusted-ca-bundle\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218788 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dpq2\" (UniqueName: \"kubernetes.io/projected/94b4f653-06ce-483a-9da8-9cf32064525e-kube-api-access-7dpq2\") pod \"control-plane-machine-set-operator-78cbb6b69f-nr7rm\" (UID: \"94b4f653-06ce-483a-9da8-9cf32064525e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218819 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b18276dc-c893-45f6-a36d-37a8ae844715-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218848 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-service-ca\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218881 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218909 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twgnp\" (UniqueName: \"kubernetes.io/projected/f544d759-f5c6-430f-984e-c0768447ae1d-kube-api-access-twgnp\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218942 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jl2r5\" (UniqueName: \"kubernetes.io/projected/3c1294ee-c0f8-487b-9639-1281c3b687ed-kube-api-access-jl2r5\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218966 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-config\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.218989 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f544d759-f5c6-430f-984e-c0768447ae1d-serving-cert\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.219018 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd6c09ef-f524-4ca9-9104-f506b6784087-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.219048 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-client\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.219241 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.219401 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-console-config\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.219660 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-service-ca\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.219755 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hf5p7"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.219978 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-config\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.220823 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.220964 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-service-ca\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.221061 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f502af72-4499-47f8-adbf-8c1fa6aeddf9-config\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.221559 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/3c14528b-b8c0-4b6d-90c8-95904704d096-console-oauth-config\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.221672 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.221745 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f544d759-f5c6-430f-984e-c0768447ae1d-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.222591 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3c14528b-b8c0-4b6d-90c8-95904704d096-trusted-ca-bundle\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.222680 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-mr42k"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.223626 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b18276dc-c893-45f6-a36d-37a8ae844715-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.223719 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-mr42k" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.223926 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-7tm4l"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.224967 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bd6c09ef-f524-4ca9-9104-f506b6784087-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.225141 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f502af72-4499-47f8-adbf-8c1fa6aeddf9-etcd-client\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.225628 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62d3326f-6c05-47b4-852f-00ca08c502d1-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.225935 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f502af72-4499-47f8-adbf-8c1fa6aeddf9-serving-cert\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.226197 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f544d759-f5c6-430f-984e-c0768447ae1d-serving-cert\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.226332 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-dqksf"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.227513 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.229255 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/3c14528b-b8c0-4b6d-90c8-95904704d096-console-serving-cert\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.230313 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.231740 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-dcwsq"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.233563 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.234985 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-6m6pm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.236733 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-xtsd4"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.238353 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.240040 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.241147 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.241528 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.248544 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-srv-cert\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.254499 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.254553 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xzqtm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.254746 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.256500 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.257575 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.259694 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.260715 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.263954 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.265651 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.266927 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.267893 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.270017 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.270076 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-m4rhm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.272106 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.274909 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-mr42k"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.276025 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2phng"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.277259 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-n7f6b"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.279881 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.280251 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-m4rhm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.281842 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hf5p7"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.286269 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-67tg6"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.288593 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.295510 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4qpf8"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.299514 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pmwh4"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.300019 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.301380 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.303192 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.304656 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-86g8w"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.306637 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.310925 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3c1294ee-c0f8-487b-9639-1281c3b687ed-proxy-tls\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.313120 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-kh9hk"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.314372 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.315134 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dc5dm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.317748 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dc5dm"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.318065 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.319411 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.320122 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-apiservice-cert\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.320179 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/94b4f653-06ce-483a-9da8-9cf32064525e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nr7rm\" (UID: \"94b4f653-06ce-483a-9da8-9cf32064525e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.320221 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/665eefbd-7f4c-485c-9822-f4f3cc1a67a5-metrics-tls\") pod \"dns-operator-744455d44c-2phng\" (UID: \"665eefbd-7f4c-485c-9822-f4f3cc1a67a5\") " pod="openshift-dns-operator/dns-operator-744455d44c-2phng" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.320274 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgsvv\" (UniqueName: \"kubernetes.io/projected/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-kube-api-access-lgsvv\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.320304 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-tmpfs\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.320402 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-webhook-cert\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.320493 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff5kj\" (UniqueName: \"kubernetes.io/projected/665eefbd-7f4c-485c-9822-f4f3cc1a67a5-kube-api-access-ff5kj\") pod \"dns-operator-744455d44c-2phng\" (UID: \"665eefbd-7f4c-485c-9822-f4f3cc1a67a5\") " pod="openshift-dns-operator/dns-operator-744455d44c-2phng" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.320534 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dpq2\" (UniqueName: \"kubernetes.io/projected/94b4f653-06ce-483a-9da8-9cf32064525e-kube-api-access-7dpq2\") pod \"control-plane-machine-set-operator-78cbb6b69f-nr7rm\" (UID: \"94b4f653-06ce-483a-9da8-9cf32064525e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.321314 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-tmpfs\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.339225 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.379947 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.384550 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/94b4f653-06ce-483a-9da8-9cf32064525e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-nr7rm\" (UID: \"94b4f653-06ce-483a-9da8-9cf32064525e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.399429 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.439944 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jnsz\" (UniqueName: \"kubernetes.io/projected/93b2feab-2bfb-4bf2-b63d-b8cd5253509f-kube-api-access-6jnsz\") pod \"apiserver-76f77b778f-64gk2\" (UID: \"93b2feab-2bfb-4bf2-b63d-b8cd5253509f\") " pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.456545 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvd6t\" (UniqueName: \"kubernetes.io/projected/b15a18b7-d1cb-4054-9e79-89e2681747f2-kube-api-access-fvd6t\") pod \"route-controller-manager-6576b87f9c-46nzm\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.482865 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cz6c8\" (UniqueName: \"kubernetes.io/projected/30dd5c51-7a6e-4571-b3ae-c33150488286-kube-api-access-cz6c8\") pod \"apiserver-7bbb656c7d-5jtcs\" (UID: \"30dd5c51-7a6e-4571-b3ae-c33150488286\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.495713 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4442p\" (UniqueName: \"kubernetes.io/projected/221a9776-8306-4dda-b262-3152d9bb212e-kube-api-access-4442p\") pod \"machine-approver-56656f9798-nw9pk\" (UID: \"221a9776-8306-4dda-b262-3152d9bb212e\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.511662 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.517284 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjjbn\" (UniqueName: \"kubernetes.io/projected/692575a0-8869-436e-b912-838ad302c4bd-kube-api-access-zjjbn\") pod \"openshift-apiserver-operator-796bbdcf4f-pxm7c\" (UID: \"692575a0-8869-436e-b912-838ad302c4bd\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.519829 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.526108 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-apiservice-cert\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.526127 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-webhook-cert\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.559791 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k28mz\" (UniqueName: \"kubernetes.io/projected/7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5-kube-api-access-k28mz\") pod \"machine-api-operator-5694c8668f-dcwsq\" (UID: \"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.578435 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz4dg\" (UniqueName: \"kubernetes.io/projected/b4504e14-e8ec-4fea-acff-0848c20861b0-kube-api-access-rz4dg\") pod \"controller-manager-879f6c89f-b78xz\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.582499 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.596600 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khvh2\" (UniqueName: \"kubernetes.io/projected/fd882e25-5875-43ed-9d01-34b92fe44587-kube-api-access-khvh2\") pod \"oauth-openshift-558db77b4-xtsd4\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.611018 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.616826 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv6gr\" (UniqueName: \"kubernetes.io/projected/68456817-e55c-47f3-a4d6-c1f11bac172c-kube-api-access-rv6gr\") pod \"cluster-samples-operator-665b6dd947-6mckc\" (UID: \"68456817-e55c-47f3-a4d6-c1f11bac172c\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.641365 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cxlr\" (UniqueName: \"kubernetes.io/projected/1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f-kube-api-access-2cxlr\") pod \"openshift-config-operator-7777fb866f-2jfg2\" (UID: \"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.659440 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.661299 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6hgf\" (UniqueName: \"kubernetes.io/projected/be35ecf5-83be-4063-9d6a-939bd1a78def-kube-api-access-g6hgf\") pod \"downloads-7954f5f757-dqksf\" (UID: \"be35ecf5-83be-4063-9d6a-939bd1a78def\") " pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.678420 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.681745 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.696481 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.708042 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.718178 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.720412 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.721243 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.739665 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.751938 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/665eefbd-7f4c-485c-9822-f4f3cc1a67a5-metrics-tls\") pod \"dns-operator-744455d44c-2phng\" (UID: \"665eefbd-7f4c-485c-9822-f4f3cc1a67a5\") " pod="openshift-dns-operator/dns-operator-744455d44c-2phng" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.760118 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.787595 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.796002 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.799669 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.806211 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.821200 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.836509 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-64gk2"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.840673 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.847054 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.859508 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.868540 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.871424 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.889134 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.928493 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.934773 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.940268 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc"] Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.941622 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.961562 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 09:30:32 crc kubenswrapper[4734]: I1125 09:30:32.979537 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.002201 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.020445 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.043226 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.060449 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.079471 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.086980 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-xtsd4"] Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.098969 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.115878 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-dqksf"] Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.119710 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.140024 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 09:30:33 crc kubenswrapper[4734]: W1125 09:30:33.158414 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe35ecf5_83be_4063_9d6a_939bd1a78def.slice/crio-03ae581bc32ea2fedd8b4af997ba7dc1de8889575a261d1c37cc0d7b0e2821c3 WatchSource:0}: Error finding container 03ae581bc32ea2fedd8b4af997ba7dc1de8889575a261d1c37cc0d7b0e2821c3: Status 404 returned error can't find the container with id 03ae581bc32ea2fedd8b4af997ba7dc1de8889575a261d1c37cc0d7b0e2821c3 Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.160079 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.163584 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" event={"ID":"93b2feab-2bfb-4bf2-b63d-b8cd5253509f","Type":"ContainerStarted","Data":"1af81bebee85d3955e4f27b414d368c444761497c638b3bec32dc254002c04fe"} Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.165833 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" event={"ID":"fd882e25-5875-43ed-9d01-34b92fe44587","Type":"ContainerStarted","Data":"4beb5f1cfbf01a7281fa5dc7cf0b08de6a504246fe718e7239776729e42326f3"} Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.171304 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" event={"ID":"30dd5c51-7a6e-4571-b3ae-c33150488286","Type":"ContainerStarted","Data":"1ed058ea063aa7c4596f75ca64daad820541fee4107c8164d035529146bee35c"} Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.176365 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" event={"ID":"221a9776-8306-4dda-b262-3152d9bb212e","Type":"ContainerStarted","Data":"1f7e39098347762c963c0b3640aa87adf6f44359f3c180b77d38ebad9ea24f38"} Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.177869 4734 request.go:700] Waited for 1.010390558s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-multus/secrets?fieldSelector=metadata.name%3Dmultus-admission-controller-secret&limit=500&resourceVersion=0 Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.180677 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.202239 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.210121 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-dcwsq"] Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.219912 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.246412 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.247857 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm"] Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.249888 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b78xz"] Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.261055 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: W1125 09:30:33.289989 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4504e14_e8ec_4fea_acff_0848c20861b0.slice/crio-10c76dc2891e398a678fe64c7fb3f53cc1daff024a1e40a62a7b68934687cb54 WatchSource:0}: Error finding container 10c76dc2891e398a678fe64c7fb3f53cc1daff024a1e40a62a7b68934687cb54: Status 404 returned error can't find the container with id 10c76dc2891e398a678fe64c7fb3f53cc1daff024a1e40a62a7b68934687cb54 Nov 25 09:30:33 crc kubenswrapper[4734]: W1125 09:30:33.293309 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb15a18b7_d1cb_4054_9e79_89e2681747f2.slice/crio-dfce1cc5879b7ccc79aba96bdb9995c70d9559ddb1bb0604479aa3bd6e17f287 WatchSource:0}: Error finding container dfce1cc5879b7ccc79aba96bdb9995c70d9559ddb1bb0604479aa3bd6e17f287: Status 404 returned error can't find the container with id dfce1cc5879b7ccc79aba96bdb9995c70d9559ddb1bb0604479aa3bd6e17f287 Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.299515 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.319838 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.339621 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.358725 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.383957 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.386567 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2"] Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.398630 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c"] Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.402036 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.419694 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.439835 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.459313 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.480402 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.500359 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.520100 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.540628 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.560258 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.580443 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.599400 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.619319 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.641589 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.659384 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.679585 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.699315 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.718679 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.744189 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.758307 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.780015 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.799874 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.820027 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.861312 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b18276dc-c893-45f6-a36d-37a8ae844715-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pgxfp\" (UID: \"b18276dc-c893-45f6-a36d-37a8ae844715\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.874386 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twgnp\" (UniqueName: \"kubernetes.io/projected/f544d759-f5c6-430f-984e-c0768447ae1d-kube-api-access-twgnp\") pod \"authentication-operator-69f744f599-7tm4l\" (UID: \"f544d759-f5c6-430f-984e-c0768447ae1d\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.896329 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb5xv\" (UniqueName: \"kubernetes.io/projected/3c14528b-b8c0-4b6d-90c8-95904704d096-kube-api-access-nb5xv\") pod \"console-f9d7485db-6m6pm\" (UID: \"3c14528b-b8c0-4b6d-90c8-95904704d096\") " pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.917054 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npwrf\" (UniqueName: \"kubernetes.io/projected/62d3326f-6c05-47b4-852f-00ca08c502d1-kube-api-access-npwrf\") pod \"openshift-controller-manager-operator-756b6f6bc6-vflz8\" (UID: \"62d3326f-6c05-47b4-852f-00ca08c502d1\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.954560 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22spz\" (UniqueName: \"kubernetes.io/projected/f502af72-4499-47f8-adbf-8c1fa6aeddf9-kube-api-access-22spz\") pod \"etcd-operator-b45778765-xzqtm\" (UID: \"f502af72-4499-47f8-adbf-8c1fa6aeddf9\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.960590 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.966230 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6llb\" (UniqueName: \"kubernetes.io/projected/96ac3c5a-8e5f-41f6-9668-ae0ac1f47326-kube-api-access-x6llb\") pod \"catalog-operator-68c6474976-ncw46\" (UID: \"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:33 crc kubenswrapper[4734]: I1125 09:30:33.979115 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.001027 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.021072 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.051834 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.059882 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.062177 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jl2r5\" (UniqueName: \"kubernetes.io/projected/3c1294ee-c0f8-487b-9639-1281c3b687ed-kube-api-access-jl2r5\") pod \"machine-config-controller-84d6567774-9h7bw\" (UID: \"3c1294ee-c0f8-487b-9639-1281c3b687ed\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.091285 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.100028 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.101846 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd6c09ef-f524-4ca9-9104-f506b6784087-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-nbrjt\" (UID: \"bd6c09ef-f524-4ca9-9104-f506b6784087\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.103784 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.119915 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.121142 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.130926 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.135231 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.140377 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.146351 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.161834 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.177933 4734 request.go:700] Waited for 1.90541023s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns/configmaps?fieldSelector=metadata.name%3Ddns-default&limit=500&resourceVersion=0 Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.180395 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.181063 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.197476 4734 generic.go:334] "Generic (PLEG): container finished" podID="93b2feab-2bfb-4bf2-b63d-b8cd5253509f" containerID="f4f4a70cf0bd86d49b9d1a288b83644b2bee27314fce5a7c38f9a399e52bd708" exitCode=0 Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.198077 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" event={"ID":"93b2feab-2bfb-4bf2-b63d-b8cd5253509f","Type":"ContainerDied","Data":"f4f4a70cf0bd86d49b9d1a288b83644b2bee27314fce5a7c38f9a399e52bd708"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.200208 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.202568 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" event={"ID":"fd882e25-5875-43ed-9d01-34b92fe44587","Type":"ContainerStarted","Data":"4ca1a8a25d52043eba1d96f1b2e1306a8873e6c6cf2d1733f1bbe22175af50ae"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.202837 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.206106 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" event={"ID":"68456817-e55c-47f3-a4d6-c1f11bac172c","Type":"ContainerStarted","Data":"d2c7e7c1f41d27c974449fd23cbafb8fda8e9c7b7f584e1d83338669414e3519"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.206149 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" event={"ID":"68456817-e55c-47f3-a4d6-c1f11bac172c","Type":"ContainerStarted","Data":"e07d57c270354756261686121eb525cd216ee5daed3c79f599d237df7eedd8e4"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.207131 4734 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-xtsd4 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.11:6443/healthz\": dial tcp 10.217.0.11:6443: connect: connection refused" start-of-body= Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.207184 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" podUID="fd882e25-5875-43ed-9d01-34b92fe44587" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.11:6443/healthz\": dial tcp 10.217.0.11:6443: connect: connection refused" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.209814 4734 generic.go:334] "Generic (PLEG): container finished" podID="30dd5c51-7a6e-4571-b3ae-c33150488286" containerID="854ab7a95e078ed11d406ad0dbd62e9a5a6ede9ae8a63613c99e85f8b7a10023" exitCode=0 Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.210272 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" event={"ID":"30dd5c51-7a6e-4571-b3ae-c33150488286","Type":"ContainerDied","Data":"854ab7a95e078ed11d406ad0dbd62e9a5a6ede9ae8a63613c99e85f8b7a10023"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.228045 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.239607 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.244794 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" event={"ID":"b15a18b7-d1cb-4054-9e79-89e2681747f2","Type":"ContainerStarted","Data":"1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.244836 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" event={"ID":"b15a18b7-d1cb-4054-9e79-89e2681747f2","Type":"ContainerStarted","Data":"dfce1cc5879b7ccc79aba96bdb9995c70d9559ddb1bb0604479aa3bd6e17f287"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.254709 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" event={"ID":"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5","Type":"ContainerStarted","Data":"0e5529060a2d20176a132427086bd56d88226bc5108a658721b7e237b560d5f2"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.254773 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" event={"ID":"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5","Type":"ContainerStarted","Data":"9400239d308bfd7170b83fc95580312c245797236767f91692f294164bff942f"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.254787 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" event={"ID":"221a9776-8306-4dda-b262-3152d9bb212e","Type":"ContainerStarted","Data":"b43602282ea6e045469bb66360752a116e856458e4f61225d77e2d6ce2ae202b"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.254805 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" event={"ID":"221a9776-8306-4dda-b262-3152d9bb212e","Type":"ContainerStarted","Data":"4ab341818e314f6d929037ea4ef511f0856121116341b43aa1e28ccd80001e99"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.260398 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.267782 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" event={"ID":"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f","Type":"ContainerStarted","Data":"4a128f0e4f165520d9472656da2e2bca68f65a1ad21fc995f110375a914e51ca"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.267853 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" event={"ID":"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f","Type":"ContainerStarted","Data":"19b12819cc8c1c52619591658951120bb3b1833b3e011f74e5556f7fac89eb86"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.271645 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" event={"ID":"692575a0-8869-436e-b912-838ad302c4bd","Type":"ContainerStarted","Data":"8898d9629b3299cdfcefd18d64e50dba4d4a8146562ac8fac2c3850f5aebcfd8"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.271720 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" event={"ID":"692575a0-8869-436e-b912-838ad302c4bd","Type":"ContainerStarted","Data":"7b0f00604c151f5c036fc20a91cef979034eadaf7ea85ec30dc4ff0b4f3e6267"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.280190 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.298218 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dqksf" event={"ID":"be35ecf5-83be-4063-9d6a-939bd1a78def","Type":"ContainerStarted","Data":"7f1af46ed7ff1aae8264c246a2edf72c96b39f0eaeb36c62503fbae471b7d849"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.298322 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dqksf" event={"ID":"be35ecf5-83be-4063-9d6a-939bd1a78def","Type":"ContainerStarted","Data":"03ae581bc32ea2fedd8b4af997ba7dc1de8889575a261d1c37cc0d7b0e2821c3"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.299045 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.299264 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.300849 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.300906 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.301536 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" event={"ID":"b4504e14-e8ec-4fea-acff-0848c20861b0","Type":"ContainerStarted","Data":"be5f669b3f9f6c2f98d367635c875de0c8a33299e2a7172710f729181d76661a"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.301591 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" event={"ID":"b4504e14-e8ec-4fea-acff-0848c20861b0","Type":"ContainerStarted","Data":"10c76dc2891e398a678fe64c7fb3f53cc1daff024a1e40a62a7b68934687cb54"} Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.301788 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.303053 4734 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-b78xz container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.303171 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" podUID="b4504e14-e8ec-4fea-acff-0848c20861b0" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.321681 4734 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.339461 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-7tm4l"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.340540 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.405036 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgsvv\" (UniqueName: \"kubernetes.io/projected/e42c2e28-78c7-400d-97f3-ba7ac7336c1d-kube-api-access-lgsvv\") pod \"packageserver-d55dfcdfc-59nlg\" (UID: \"e42c2e28-78c7-400d-97f3-ba7ac7336c1d\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.416558 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff5kj\" (UniqueName: \"kubernetes.io/projected/665eefbd-7f4c-485c-9822-f4f3cc1a67a5-kube-api-access-ff5kj\") pod \"dns-operator-744455d44c-2phng\" (UID: \"665eefbd-7f4c-485c-9822-f4f3cc1a67a5\") " pod="openshift-dns-operator/dns-operator-744455d44c-2phng" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.416581 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dpq2\" (UniqueName: \"kubernetes.io/projected/94b4f653-06ce-483a-9da8-9cf32064525e-kube-api-access-7dpq2\") pod \"control-plane-machine-set-operator-78cbb6b69f-nr7rm\" (UID: \"94b4f653-06ce-483a-9da8-9cf32064525e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" Nov 25 09:30:34 crc kubenswrapper[4734]: W1125 09:30:34.460233 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf544d759_f5c6_430f_984e_c0768447ae1d.slice/crio-9689e40e631f2cfb752199fc5e531ea7d594ee0eb7b5971455ea3237044d9174 WatchSource:0}: Error finding container 9689e40e631f2cfb752199fc5e531ea7d594ee0eb7b5971455ea3237044d9174: Status 404 returned error can't find the container with id 9689e40e631f2cfb752199fc5e531ea7d594ee0eb7b5971455ea3237044d9174 Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467028 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467355 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-ca-trust-extracted\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467390 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/396d5204-fafd-4cad-b618-bf4642993d64-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467457 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/396d5204-fafd-4cad-b618-bf4642993d64-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467508 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm4pb\" (UniqueName: \"kubernetes.io/projected/396d5204-fafd-4cad-b618-bf4642993d64-kube-api-access-zm4pb\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467600 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f98607ff-98e0-4c9b-836a-80db389b4529-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-67tg6\" (UID: \"f98607ff-98e0-4c9b-836a-80db389b4529\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467761 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-installation-pull-secrets\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467789 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8cd3ca49-4666-4bb9-a7de-10127d4cea72-metrics-tls\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467813 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm5vf\" (UniqueName: \"kubernetes.io/projected/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-kube-api-access-sm5vf\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467880 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2eae88c3-e1c2-4990-9db0-50995f7d2696-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jbtkt\" (UID: \"2eae88c3-e1c2-4990-9db0-50995f7d2696\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467913 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467961 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf854d0-c203-4a18-8c15-67d75a1928da-config\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.467983 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2mx9\" (UniqueName: \"kubernetes.io/projected/2eae88c3-e1c2-4990-9db0-50995f7d2696-kube-api-access-k2mx9\") pod \"package-server-manager-789f6589d5-jbtkt\" (UID: \"2eae88c3-e1c2-4990-9db0-50995f7d2696\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468005 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468040 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468123 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-bound-sa-token\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468247 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8cd3ca49-4666-4bb9-a7de-10127d4cea72-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468297 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjvct\" (UniqueName: \"kubernetes.io/projected/8cd3ca49-4666-4bb9-a7de-10127d4cea72-kube-api-access-bjvct\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468319 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhm6p\" (UniqueName: \"kubernetes.io/projected/f98607ff-98e0-4c9b-836a-80db389b4529-kube-api-access-nhm6p\") pod \"multus-admission-controller-857f4d67dd-67tg6\" (UID: \"f98607ff-98e0-4c9b-836a-80db389b4529\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468357 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wst2q\" (UniqueName: \"kubernetes.io/projected/a07365e2-39f2-47a6-b55d-38999cba5ea3-kube-api-access-wst2q\") pod \"migrator-59844c95c7-ddc5m\" (UID: \"a07365e2-39f2-47a6-b55d-38999cba5ea3\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468379 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8cd3ca49-4666-4bb9-a7de-10127d4cea72-trusted-ca\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468442 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-tls\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468518 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-trusted-ca\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468545 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pjsj\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-kube-api-access-5pjsj\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468563 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5q5q\" (UniqueName: \"kubernetes.io/projected/4cf854d0-c203-4a18-8c15-67d75a1928da-kube-api-access-l5q5q\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468726 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4cf854d0-c203-4a18-8c15-67d75a1928da-trusted-ca\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468798 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-certificates\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.468840 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf854d0-c203-4a18-8c15-67d75a1928da-serving-cert\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.469800 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/396d5204-fafd-4cad-b618-bf4642993d64-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.473556 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:34 crc kubenswrapper[4734]: E1125 09:30:34.479128 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:34.979064677 +0000 UTC m=+157.789526671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.483791 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-2phng" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.571224 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572021 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8cd3ca49-4666-4bb9-a7de-10127d4cea72-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572069 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/efcf38db-26a3-43b0-95d3-60578dd458e8-metrics-tls\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572186 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0eecaac3-4db7-4d35-8387-31050571269f-signing-key\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572221 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhm6p\" (UniqueName: \"kubernetes.io/projected/f98607ff-98e0-4c9b-836a-80db389b4529-kube-api-access-nhm6p\") pod \"multus-admission-controller-857f4d67dd-67tg6\" (UID: \"f98607ff-98e0-4c9b-836a-80db389b4529\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572250 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61b7ec5c-9240-4232-b303-6a4978e53beb-secret-volume\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572280 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjvct\" (UniqueName: \"kubernetes.io/projected/8cd3ca49-4666-4bb9-a7de-10127d4cea72-kube-api-access-bjvct\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572311 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572336 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wst2q\" (UniqueName: \"kubernetes.io/projected/a07365e2-39f2-47a6-b55d-38999cba5ea3-kube-api-access-wst2q\") pod \"migrator-59844c95c7-ddc5m\" (UID: \"a07365e2-39f2-47a6-b55d-38999cba5ea3\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572361 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572407 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8cd3ca49-4666-4bb9-a7de-10127d4cea72-trusted-ca\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572434 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-844wr\" (UniqueName: \"kubernetes.io/projected/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-kube-api-access-844wr\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572469 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9gfl\" (UniqueName: \"kubernetes.io/projected/0eecaac3-4db7-4d35-8387-31050571269f-kube-api-access-f9gfl\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572499 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl99z\" (UniqueName: \"kubernetes.io/projected/f3b07318-384d-4be4-9784-27312c2429dc-kube-api-access-pl99z\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572530 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-registration-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.572576 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0eecaac3-4db7-4d35-8387-31050571269f-signing-cabundle\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.573007 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f3b07318-384d-4be4-9784-27312c2429dc-proxy-tls\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.573129 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-tls\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.573154 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-serving-cert\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.573178 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vq4k\" (UniqueName: \"kubernetes.io/projected/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-kube-api-access-9vq4k\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: E1125 09:30:34.576276 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.076237892 +0000 UTC m=+157.886699876 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.576525 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8cd3ca49-4666-4bb9-a7de-10127d4cea72-trusted-ca\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.576667 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-config\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.577335 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ac31b82-a8a5-41d0-8588-828d292d6521-config\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.577390 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d9e9d0ee-441c-46f3-9913-db2719215839-node-bootstrap-token\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.577434 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-trusted-ca\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.577478 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-metrics-certs\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.577651 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pjsj\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-kube-api-access-5pjsj\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.577687 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5q5q\" (UniqueName: \"kubernetes.io/projected/4cf854d0-c203-4a18-8c15-67d75a1928da-kube-api-access-l5q5q\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.578739 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwmf7\" (UniqueName: \"kubernetes.io/projected/61b7ec5c-9240-4232-b303-6a4978e53beb-kube-api-access-jwmf7\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579074 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/efcf38db-26a3-43b0-95d3-60578dd458e8-config-volume\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579144 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f3b07318-384d-4be4-9784-27312c2429dc-images\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579188 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4cf854d0-c203-4a18-8c15-67d75a1928da-trusted-ca\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579213 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-certificates\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579236 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8scbt\" (UniqueName: \"kubernetes.io/projected/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-kube-api-access-8scbt\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579275 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf854d0-c203-4a18-8c15-67d75a1928da-serving-cert\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579296 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/396d5204-fafd-4cad-b618-bf4642993d64-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579314 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-default-certificate\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579331 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-ca-trust-extracted\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579333 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-trusted-ca\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579350 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/396d5204-fafd-4cad-b618-bf4642993d64-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579369 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm4xv\" (UniqueName: \"kubernetes.io/projected/2af950d7-5574-421c-a368-2f4a74be9344-kube-api-access-fm4xv\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579406 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f-cert\") pod \"ingress-canary-mr42k\" (UID: \"f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f\") " pod="openshift-ingress-canary/ingress-canary-mr42k" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579427 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/396d5204-fafd-4cad-b618-bf4642993d64-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579455 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ac31b82-a8a5-41d0-8588-828d292d6521-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579476 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm4pb\" (UniqueName: \"kubernetes.io/projected/396d5204-fafd-4cad-b618-bf4642993d64-kube-api-access-zm4pb\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579527 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f3b07318-384d-4be4-9784-27312c2429dc-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579557 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d9e9d0ee-441c-46f3-9913-db2719215839-certs\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579584 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f98607ff-98e0-4c9b-836a-80db389b4529-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-67tg6\" (UID: \"f98607ff-98e0-4c9b-836a-80db389b4529\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579633 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7twk8\" (UniqueName: \"kubernetes.io/projected/f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f-kube-api-access-7twk8\") pod \"ingress-canary-mr42k\" (UID: \"f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f\") " pod="openshift-ingress-canary/ingress-canary-mr42k" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579653 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-service-ca-bundle\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579714 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl8lh\" (UniqueName: \"kubernetes.io/projected/d9e9d0ee-441c-46f3-9913-db2719215839-kube-api-access-pl8lh\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579748 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-stats-auth\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579779 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-installation-pull-secrets\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579801 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8cd3ca49-4666-4bb9-a7de-10127d4cea72-metrics-tls\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579822 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm5vf\" (UniqueName: \"kubernetes.io/projected/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-kube-api-access-sm5vf\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579846 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2af950d7-5574-421c-a368-2f4a74be9344-profile-collector-cert\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579865 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzpfh\" (UniqueName: \"kubernetes.io/projected/fb8016be-00f4-4c15-b17c-463a385be317-kube-api-access-rzpfh\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579885 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61b7ec5c-9240-4232-b303-6a4978e53beb-config-volume\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579906 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-mountpoint-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579949 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2eae88c3-e1c2-4990-9db0-50995f7d2696-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jbtkt\" (UID: \"2eae88c3-e1c2-4990-9db0-50995f7d2696\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579979 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.579999 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.580021 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf854d0-c203-4a18-8c15-67d75a1928da-config\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.580040 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2mx9\" (UniqueName: \"kubernetes.io/projected/2eae88c3-e1c2-4990-9db0-50995f7d2696-kube-api-access-k2mx9\") pod \"package-server-manager-789f6589d5-jbtkt\" (UID: \"2eae88c3-e1c2-4990-9db0-50995f7d2696\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.580058 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.580735 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4cf854d0-c203-4a18-8c15-67d75a1928da-trusted-ca\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.584234 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-socket-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.584285 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-bound-sa-token\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.584323 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2af950d7-5574-421c-a368-2f4a74be9344-srv-cert\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.584365 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ac31b82-a8a5-41d0-8588-828d292d6521-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.584385 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-plugins-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.584416 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-csi-data-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.584474 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpw9s\" (UniqueName: \"kubernetes.io/projected/efcf38db-26a3-43b0-95d3-60578dd458e8-kube-api-access-fpw9s\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.586929 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-ca-trust-extracted\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.587342 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/396d5204-fafd-4cad-b618-bf4642993d64-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: E1125 09:30:34.587944 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.087923858 +0000 UTC m=+157.898385852 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.589401 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.589570 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/2eae88c3-e1c2-4990-9db0-50995f7d2696-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-jbtkt\" (UID: \"2eae88c3-e1c2-4990-9db0-50995f7d2696\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.589996 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4cf854d0-c203-4a18-8c15-67d75a1928da-config\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.590000 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-certificates\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.590833 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-installation-pull-secrets\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.591743 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/f98607ff-98e0-4c9b-836a-80db389b4529-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-67tg6\" (UID: \"f98607ff-98e0-4c9b-836a-80db389b4529\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.592369 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/396d5204-fafd-4cad-b618-bf4642993d64-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.592375 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8cd3ca49-4666-4bb9-a7de-10127d4cea72-metrics-tls\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.597939 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-tls\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.600957 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8cd3ca49-4666-4bb9-a7de-10127d4cea72-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.602446 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.609069 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf854d0-c203-4a18-8c15-67d75a1928da-serving-cert\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.633370 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhm6p\" (UniqueName: \"kubernetes.io/projected/f98607ff-98e0-4c9b-836a-80db389b4529-kube-api-access-nhm6p\") pod \"multus-admission-controller-857f4d67dd-67tg6\" (UID: \"f98607ff-98e0-4c9b-836a-80db389b4529\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.653309 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjvct\" (UniqueName: \"kubernetes.io/projected/8cd3ca49-4666-4bb9-a7de-10127d4cea72-kube-api-access-bjvct\") pod \"ingress-operator-5b745b69d9-ktlr9\" (UID: \"8cd3ca49-4666-4bb9-a7de-10127d4cea72\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.671848 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wst2q\" (UniqueName: \"kubernetes.io/projected/a07365e2-39f2-47a6-b55d-38999cba5ea3-kube-api-access-wst2q\") pod \"migrator-59844c95c7-ddc5m\" (UID: \"a07365e2-39f2-47a6-b55d-38999cba5ea3\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.681769 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.681820 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-6m6pm"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.685717 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686337 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-config\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686412 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ac31b82-a8a5-41d0-8588-828d292d6521-config\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686443 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d9e9d0ee-441c-46f3-9913-db2719215839-node-bootstrap-token\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686471 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-metrics-certs\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686527 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwmf7\" (UniqueName: \"kubernetes.io/projected/61b7ec5c-9240-4232-b303-6a4978e53beb-kube-api-access-jwmf7\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686786 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/efcf38db-26a3-43b0-95d3-60578dd458e8-config-volume\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686822 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f3b07318-384d-4be4-9784-27312c2429dc-images\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686855 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8scbt\" (UniqueName: \"kubernetes.io/projected/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-kube-api-access-8scbt\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686885 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-default-certificate\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686920 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm4xv\" (UniqueName: \"kubernetes.io/projected/2af950d7-5574-421c-a368-2f4a74be9344-kube-api-access-fm4xv\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686959 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f-cert\") pod \"ingress-canary-mr42k\" (UID: \"f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f\") " pod="openshift-ingress-canary/ingress-canary-mr42k" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.686986 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ac31b82-a8a5-41d0-8588-828d292d6521-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687026 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d9e9d0ee-441c-46f3-9913-db2719215839-certs\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687051 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f3b07318-384d-4be4-9784-27312c2429dc-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687118 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-service-ca-bundle\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687149 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7twk8\" (UniqueName: \"kubernetes.io/projected/f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f-kube-api-access-7twk8\") pod \"ingress-canary-mr42k\" (UID: \"f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f\") " pod="openshift-ingress-canary/ingress-canary-mr42k" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687181 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl8lh\" (UniqueName: \"kubernetes.io/projected/d9e9d0ee-441c-46f3-9913-db2719215839-kube-api-access-pl8lh\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687206 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-stats-auth\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687252 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2af950d7-5574-421c-a368-2f4a74be9344-profile-collector-cert\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687279 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzpfh\" (UniqueName: \"kubernetes.io/projected/fb8016be-00f4-4c15-b17c-463a385be317-kube-api-access-rzpfh\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687306 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-mountpoint-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687337 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61b7ec5c-9240-4232-b303-6a4978e53beb-config-volume\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687383 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-socket-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687414 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ac31b82-a8a5-41d0-8588-828d292d6521-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687439 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-plugins-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687465 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2af950d7-5574-421c-a368-2f4a74be9344-srv-cert\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687489 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-csi-data-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687524 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpw9s\" (UniqueName: \"kubernetes.io/projected/efcf38db-26a3-43b0-95d3-60578dd458e8-kube-api-access-fpw9s\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687546 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/efcf38db-26a3-43b0-95d3-60578dd458e8-metrics-tls\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687575 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0eecaac3-4db7-4d35-8387-31050571269f-signing-key\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687599 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61b7ec5c-9240-4232-b303-6a4978e53beb-secret-volume\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687626 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687651 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687679 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-844wr\" (UniqueName: \"kubernetes.io/projected/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-kube-api-access-844wr\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687704 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9gfl\" (UniqueName: \"kubernetes.io/projected/0eecaac3-4db7-4d35-8387-31050571269f-kube-api-access-f9gfl\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687731 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl99z\" (UniqueName: \"kubernetes.io/projected/f3b07318-384d-4be4-9784-27312c2429dc-kube-api-access-pl99z\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687760 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-registration-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687783 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0eecaac3-4db7-4d35-8387-31050571269f-signing-cabundle\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687806 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f3b07318-384d-4be4-9784-27312c2429dc-proxy-tls\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687832 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-serving-cert\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.687856 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vq4k\" (UniqueName: \"kubernetes.io/projected/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-kube-api-access-9vq4k\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.689011 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pjsj\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-kube-api-access-5pjsj\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.689069 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/efcf38db-26a3-43b0-95d3-60578dd458e8-config-volume\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.689336 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-socket-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.689803 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-plugins-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.690964 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/f3b07318-384d-4be4-9784-27312c2429dc-images\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.691677 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-config\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: E1125 09:30:34.691759 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.19174133 +0000 UTC m=+158.002203324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.692984 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ac31b82-a8a5-41d0-8588-828d292d6521-config\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.695469 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2af950d7-5574-421c-a368-2f4a74be9344-srv-cert\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.695825 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-registration-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.696746 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0eecaac3-4db7-4d35-8387-31050571269f-signing-cabundle\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.700237 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.701012 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.706042 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f3b07318-384d-4be4-9784-27312c2429dc-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.706532 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-csi-data-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.707273 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-service-ca-bundle\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.708043 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-mountpoint-dir\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.709101 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61b7ec5c-9240-4232-b303-6a4978e53beb-config-volume\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.711977 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-metrics-certs\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.712033 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0eecaac3-4db7-4d35-8387-31050571269f-signing-key\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.712184 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2af950d7-5574-421c-a368-2f4a74be9344-profile-collector-cert\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.712234 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5q5q\" (UniqueName: \"kubernetes.io/projected/4cf854d0-c203-4a18-8c15-67d75a1928da-kube-api-access-l5q5q\") pod \"console-operator-58897d9998-n7f6b\" (UID: \"4cf854d0-c203-4a18-8c15-67d75a1928da\") " pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.715008 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f-cert\") pod \"ingress-canary-mr42k\" (UID: \"f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f\") " pod="openshift-ingress-canary/ingress-canary-mr42k" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.715392 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/efcf38db-26a3-43b0-95d3-60578dd458e8-metrics-tls\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.715869 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/d9e9d0ee-441c-46f3-9913-db2719215839-certs\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.717297 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f3b07318-384d-4be4-9784-27312c2429dc-proxy-tls\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.718316 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-stats-auth\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.718799 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-default-certificate\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.718999 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-serving-cert\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.719909 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ac31b82-a8a5-41d0-8588-828d292d6521-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.722684 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61b7ec5c-9240-4232-b303-6a4978e53beb-secret-volume\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.727693 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/d9e9d0ee-441c-46f3-9913-db2719215839-node-bootstrap-token\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.735020 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm5vf\" (UniqueName: \"kubernetes.io/projected/90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0-kube-api-access-sm5vf\") pod \"kube-storage-version-migrator-operator-b67b599dd-c48tj\" (UID: \"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.760546 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/396d5204-fafd-4cad-b618-bf4642993d64-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.776870 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zm4pb\" (UniqueName: \"kubernetes.io/projected/396d5204-fafd-4cad-b618-bf4642993d64-kube-api-access-zm4pb\") pod \"cluster-image-registry-operator-dc59b4c8b-pnvcs\" (UID: \"396d5204-fafd-4cad-b618-bf4642993d64\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.791487 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: E1125 09:30:34.792324 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.292302526 +0000 UTC m=+158.102764520 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.792585 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.798467 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.806017 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-bound-sa-token\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.806325 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.814700 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.815542 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.830408 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2mx9\" (UniqueName: \"kubernetes.io/projected/2eae88c3-e1c2-4990-9db0-50995f7d2696-kube-api-access-k2mx9\") pod \"package-server-manager-789f6589d5-jbtkt\" (UID: \"2eae88c3-e1c2-4990-9db0-50995f7d2696\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.830771 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.846119 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.849933 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.860851 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vq4k\" (UniqueName: \"kubernetes.io/projected/f0bde9ec-13b8-4565-b5f4-d2b2a308ab03-kube-api-access-9vq4k\") pod \"service-ca-operator-777779d784-86g8w\" (UID: \"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.870565 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.877078 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ac31b82-a8a5-41d0-8588-828d292d6521-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rmm42\" (UID: \"6ac31b82-a8a5-41d0-8588-828d292d6521\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.878494 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.887514 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.890683 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.893600 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.893726 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-2phng"] Nov 25 09:30:34 crc kubenswrapper[4734]: E1125 09:30:34.894045 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.394027676 +0000 UTC m=+158.204489670 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.897165 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwmf7\" (UniqueName: \"kubernetes.io/projected/61b7ec5c-9240-4232-b303-6a4978e53beb-kube-api-access-jwmf7\") pod \"collect-profiles-29401050-4d6wn\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.903684 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.906043 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8scbt\" (UniqueName: \"kubernetes.io/projected/c673b63c-b34e-41b2-ad5f-c7b258ac9b26-kube-api-access-8scbt\") pod \"router-default-5444994796-pc9l6\" (UID: \"c673b63c-b34e-41b2-ad5f-c7b258ac9b26\") " pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.919043 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7twk8\" (UniqueName: \"kubernetes.io/projected/f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f-kube-api-access-7twk8\") pod \"ingress-canary-mr42k\" (UID: \"f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f\") " pod="openshift-ingress-canary/ingress-canary-mr42k" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.925709 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-mr42k" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.935555 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm4xv\" (UniqueName: \"kubernetes.io/projected/2af950d7-5574-421c-a368-2f4a74be9344-kube-api-access-fm4xv\") pod \"olm-operator-6b444d44fb-bmmjg\" (UID: \"2af950d7-5574-421c-a368-2f4a74be9344\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.985755 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-844wr\" (UniqueName: \"kubernetes.io/projected/a3eedf9c-efe3-4ef4-bb51-5a0a345733f6-kube-api-access-844wr\") pod \"csi-hostpathplugin-dc5dm\" (UID: \"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6\") " pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.986971 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9gfl\" (UniqueName: \"kubernetes.io/projected/0eecaac3-4db7-4d35-8387-31050571269f-kube-api-access-f9gfl\") pod \"service-ca-9c57cc56f-hf5p7\" (UID: \"0eecaac3-4db7-4d35-8387-31050571269f\") " pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.987059 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.987591 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.993570 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xzqtm"] Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.997232 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:34 crc kubenswrapper[4734]: E1125 09:30:34.997666 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.497647912 +0000 UTC m=+158.308109906 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:34 crc kubenswrapper[4734]: I1125 09:30:34.999356 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl99z\" (UniqueName: \"kubernetes.io/projected/f3b07318-384d-4be4-9784-27312c2429dc-kube-api-access-pl99z\") pod \"machine-config-operator-74547568cd-b9pbx\" (UID: \"f3b07318-384d-4be4-9784-27312c2429dc\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.019580 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpw9s\" (UniqueName: \"kubernetes.io/projected/efcf38db-26a3-43b0-95d3-60578dd458e8-kube-api-access-fpw9s\") pod \"dns-default-m4rhm\" (UID: \"efcf38db-26a3-43b0-95d3-60578dd458e8\") " pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.046449 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-n7f6b"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.046770 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzpfh\" (UniqueName: \"kubernetes.io/projected/fb8016be-00f4-4c15-b17c-463a385be317-kube-api-access-rzpfh\") pod \"marketplace-operator-79b997595-4qpf8\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.054856 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.066051 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl8lh\" (UniqueName: \"kubernetes.io/projected/d9e9d0ee-441c-46f3-9913-db2719215839-kube-api-access-pl8lh\") pod \"machine-config-server-kh9hk\" (UID: \"d9e9d0ee-441c-46f3-9913-db2719215839\") " pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:35 crc kubenswrapper[4734]: W1125 09:30:35.081552 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf502af72_4499_47f8_adbf_8c1fa6aeddf9.slice/crio-b799da879060041a9e4f7fe5283d49c68f9467a53198efdf27b11fa13f1f91d5 WatchSource:0}: Error finding container b799da879060041a9e4f7fe5283d49c68f9467a53198efdf27b11fa13f1f91d5: Status 404 returned error can't find the container with id b799da879060041a9e4f7fe5283d49c68f9467a53198efdf27b11fa13f1f91d5 Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.098924 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.099401 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.599382952 +0000 UTC m=+158.409844946 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: W1125 09:30:35.106548 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4cf854d0_c203_4a18_8c15_67d75a1928da.slice/crio-95e20d09e250205cc44b174515e6478d58664d0a56043e289d55786aabb537bc WatchSource:0}: Error finding container 95e20d09e250205cc44b174515e6478d58664d0a56043e289d55786aabb537bc: Status 404 returned error can't find the container with id 95e20d09e250205cc44b174515e6478d58664d0a56043e289d55786aabb537bc Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.124970 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.147680 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.158782 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.175562 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.176546 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.202616 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.203182 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.703161363 +0000 UTC m=+158.513623357 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.211870 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.238056 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.248153 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-kh9hk" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.258536 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.276048 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.305429 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.305690 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.805653196 +0000 UTC m=+158.616115190 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.305759 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.306128 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.806112229 +0000 UTC m=+158.616574223 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.330191 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" event={"ID":"f544d759-f5c6-430f-984e-c0768447ae1d","Type":"ContainerStarted","Data":"290b7b2549bd31fc9d4cdcb87898fd00c00a25aa7009de0f98abdb88adfc547a"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.330238 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" event={"ID":"f544d759-f5c6-430f-984e-c0768447ae1d","Type":"ContainerStarted","Data":"9689e40e631f2cfb752199fc5e531ea7d594ee0eb7b5971455ea3237044d9174"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.338921 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" event={"ID":"3c1294ee-c0f8-487b-9639-1281c3b687ed","Type":"ContainerStarted","Data":"592b732504ed307c0db0c126ff7311142015bb886eaa689cc2c0416236e3cc5e"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.362811 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" event={"ID":"62d3326f-6c05-47b4-852f-00ca08c502d1","Type":"ContainerStarted","Data":"34d62bd2ce31e5251147ebba12176d83d31efed706bb44209e88c6c2f16a6d16"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.369543 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" event={"ID":"68456817-e55c-47f3-a4d6-c1f11bac172c","Type":"ContainerStarted","Data":"51c5e9e16da040a4060b215d068cf58b5097d67a3ef51fa76048497389df6ce0"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.373340 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" event={"ID":"bd6c09ef-f524-4ca9-9104-f506b6784087","Type":"ContainerStarted","Data":"042c03809c6a7e91eff8f86201323a377822a9fbf2c083fd7239f59a0f4eaaa5"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.375987 4734 generic.go:334] "Generic (PLEG): container finished" podID="1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f" containerID="4a128f0e4f165520d9472656da2e2bca68f65a1ad21fc995f110375a914e51ca" exitCode=0 Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.376054 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" event={"ID":"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f","Type":"ContainerDied","Data":"4a128f0e4f165520d9472656da2e2bca68f65a1ad21fc995f110375a914e51ca"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.381368 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" event={"ID":"30dd5c51-7a6e-4571-b3ae-c33150488286","Type":"ContainerStarted","Data":"aa4bfc7cde3fc19b24f403cf722a63646e00a7554ce4fa2cf5ee4dcd39419d1f"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.412639 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.412959 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.912892509 +0000 UTC m=+158.723354513 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.413611 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" event={"ID":"93b2feab-2bfb-4bf2-b63d-b8cd5253509f","Type":"ContainerStarted","Data":"57cc655cb06961baa4295f56f30a1bc5f7e90959bdd7782823bf061a6f57f603"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.413765 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.414782 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:35.914763484 +0000 UTC m=+158.725225478 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.439939 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" event={"ID":"e42c2e28-78c7-400d-97f3-ba7ac7336c1d","Type":"ContainerStarted","Data":"9a5bc986faa42b4a3ec51478d882735e4b08da49d2458d823bc38507df74ac14"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.454385 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" event={"ID":"94b4f653-06ce-483a-9da8-9cf32064525e","Type":"ContainerStarted","Data":"e96712f6fd38a4ab6958c27991876fd30e231de2833eb3cb215a639d04ad64ed"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.463594 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-86g8w"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.491492 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-6m6pm" event={"ID":"3c14528b-b8c0-4b6d-90c8-95904704d096","Type":"ContainerStarted","Data":"f803a0e3d58c0880105596adc697853d76e04062f1c4880902831c665ba4e65f"} Nov 25 09:30:35 crc kubenswrapper[4734]: W1125 09:30:35.495483 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod396d5204_fafd_4cad_b618_bf4642993d64.slice/crio-4c5a8b76d7420cabed332211e78541865b5b624e76d36365bf43a1bc8c256463 WatchSource:0}: Error finding container 4c5a8b76d7420cabed332211e78541865b5b624e76d36365bf43a1bc8c256463: Status 404 returned error can't find the container with id 4c5a8b76d7420cabed332211e78541865b5b624e76d36365bf43a1bc8c256463 Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.498992 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" event={"ID":"7e7ee74e-3385-49ba-83bb-c03f4ec7fdb5","Type":"ContainerStarted","Data":"ca044788be6c54ee8fe74bc3b9f0a7e8896746681ffd5361e482ab3cbb1a4252"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.503806 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" event={"ID":"8cd3ca49-4666-4bb9-a7de-10127d4cea72","Type":"ContainerStarted","Data":"57f595a32278e9bfee250a8517c9016342a7378529780e3510419fc30e445869"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.516899 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.517489 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.017450263 +0000 UTC m=+158.827912257 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.518717 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2phng" event={"ID":"665eefbd-7f4c-485c-9822-f4f3cc1a67a5","Type":"ContainerStarted","Data":"f1578bdb7f5da4267f54034837303c5570e4a6cc3ac39e6b1e46db95333d8959"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.526736 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" event={"ID":"f502af72-4499-47f8-adbf-8c1fa6aeddf9","Type":"ContainerStarted","Data":"b799da879060041a9e4f7fe5283d49c68f9467a53198efdf27b11fa13f1f91d5"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.543196 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-n7f6b" event={"ID":"4cf854d0-c203-4a18-8c15-67d75a1928da","Type":"ContainerStarted","Data":"95e20d09e250205cc44b174515e6478d58664d0a56043e289d55786aabb537bc"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.551622 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" event={"ID":"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326","Type":"ContainerStarted","Data":"c345cf7d9e03343586e77b49232fb0624d5057d4e8dafc6e7ab9b9ab5f2447ab"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.558848 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" event={"ID":"b18276dc-c893-45f6-a36d-37a8ae844715","Type":"ContainerStarted","Data":"9b463b47f6d82e8fa83b259bfdbdeca4da32563f208eb61457abe79464f86013"} Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.561845 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.561908 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.562337 4734 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-b78xz container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.565634 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" podUID="b4504e14-e8ec-4fea-acff-0848c20861b0" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.567536 4734 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-xtsd4 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.11:6443/healthz\": dial tcp 10.217.0.11:6443: connect: connection refused" start-of-body= Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.567642 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" podUID="fd882e25-5875-43ed-9d01-34b92fe44587" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.11:6443/healthz\": dial tcp 10.217.0.11:6443: connect: connection refused" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.619633 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.625738 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.125595783 +0000 UTC m=+158.936057767 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.654825 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.720903 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.721653 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.221636995 +0000 UTC m=+159.032098989 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.789755 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.811613 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-dqksf" podStartSLOduration=133.811597847 podStartE2EDuration="2m13.811597847s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:35.809770743 +0000 UTC m=+158.620232737" watchObservedRunningTime="2025-11-25 09:30:35.811597847 +0000 UTC m=+158.622059841" Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.814422 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.822750 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.823677 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.323658383 +0000 UTC m=+159.134120377 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.851640 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-mr42k"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.868376 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-67tg6"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.874990 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.875098 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.877095 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42"] Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.923904 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:35 crc kubenswrapper[4734]: E1125 09:30:35.924303 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.424288551 +0000 UTC m=+159.234750545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:35 crc kubenswrapper[4734]: I1125 09:30:35.931625 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-pxm7c" podStartSLOduration=133.931599167 podStartE2EDuration="2m13.931599167s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:35.912025588 +0000 UTC m=+158.722487582" watchObservedRunningTime="2025-11-25 09:30:35.931599167 +0000 UTC m=+158.742061161" Nov 25 09:30:35 crc kubenswrapper[4734]: W1125 09:30:35.995237 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3b07318_384d_4be4_9784_27312c2429dc.slice/crio-8c57cd06cebd58a2f9b89ac42a095303e9b9140978693ab807c72e4458c1e892 WatchSource:0}: Error finding container 8c57cd06cebd58a2f9b89ac42a095303e9b9140978693ab807c72e4458c1e892: Status 404 returned error can't find the container with id 8c57cd06cebd58a2f9b89ac42a095303e9b9140978693ab807c72e4458c1e892 Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.025436 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.026228 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.526210477 +0000 UTC m=+159.336672471 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.033941 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-nw9pk" podStartSLOduration=134.033910835 podStartE2EDuration="2m14.033910835s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:36.032626447 +0000 UTC m=+158.843088461" watchObservedRunningTime="2025-11-25 09:30:36.033910835 +0000 UTC m=+158.844372829" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.126729 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.127137 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.627113303 +0000 UTC m=+159.437575297 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.157021 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-dcwsq" podStartSLOduration=134.156984766 podStartE2EDuration="2m14.156984766s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:36.154923476 +0000 UTC m=+158.965385480" watchObservedRunningTime="2025-11-25 09:30:36.156984766 +0000 UTC m=+158.967446780" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.193229 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" podStartSLOduration=133.193199408 podStartE2EDuration="2m13.193199408s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:36.193109895 +0000 UTC m=+159.003571889" watchObservedRunningTime="2025-11-25 09:30:36.193199408 +0000 UTC m=+159.003661402" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.228579 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.229259 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.729230874 +0000 UTC m=+159.539692878 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.256105 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4qpf8"] Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.317099 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg"] Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.331112 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.331819 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.831745208 +0000 UTC m=+159.642207202 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.333051 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.333652 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.833633244 +0000 UTC m=+159.644095238 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.395233 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-m4rhm"] Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.404787 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-dc5dm"] Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.409043 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hf5p7"] Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.435947 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.436307 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.93624205 +0000 UTC m=+159.746704044 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.436358 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.436861 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:36.936841457 +0000 UTC m=+159.747303451 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: W1125 09:30:36.458070 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3eedf9c_efe3_4ef4_bb51_5a0a345733f6.slice/crio-1132367c90b443dbac03849eac39c36a1ed4c69ce12743c8f358853a9d68f38b WatchSource:0}: Error finding container 1132367c90b443dbac03849eac39c36a1ed4c69ce12743c8f358853a9d68f38b: Status 404 returned error can't find the container with id 1132367c90b443dbac03849eac39c36a1ed4c69ce12743c8f358853a9d68f38b Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.539364 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.540135 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.040109833 +0000 UTC m=+159.850571827 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.583919 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-kh9hk" event={"ID":"d9e9d0ee-441c-46f3-9913-db2719215839","Type":"ContainerStarted","Data":"2f563b8784b107675eba86a236999b9c1af86b0422a98804130ecadf8c38791e"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.588186 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" event={"ID":"6ac31b82-a8a5-41d0-8588-828d292d6521","Type":"ContainerStarted","Data":"8b2295040e6ea7ee33d4eb0677929d17d45c4ea4dc30d7eb3f1fbcc30a38db6d"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.611058 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" event={"ID":"bd6c09ef-f524-4ca9-9104-f506b6784087","Type":"ContainerStarted","Data":"3768541a9bba51af8660e191f337eb81b2b27fbd4f9e197326fab61da7cc60a5"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.626559 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" event={"ID":"2af950d7-5574-421c-a368-2f4a74be9344","Type":"ContainerStarted","Data":"75bd4d1d6f32468fe860f25a2104bf426885c9e470de9c1e199344f5dc972556"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.628571 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-mr42k" event={"ID":"f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f","Type":"ContainerStarted","Data":"f99f91489b95fa22d17dbd352965e81683863a42ced97af8defc671706a1c84e"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.630684 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" event={"ID":"8cd3ca49-4666-4bb9-a7de-10127d4cea72","Type":"ContainerStarted","Data":"d91fc0dc9fa78b0aef1705cf2907a654a450df874e927a09dc1382aa09e50cc4"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.631934 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" event={"ID":"b18276dc-c893-45f6-a36d-37a8ae844715","Type":"ContainerStarted","Data":"a1c30d25840680264d0ab3878cc8a8fbc9e6560cb88f88daffd872ebc6800fcc"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.632943 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" event={"ID":"96ac3c5a-8e5f-41f6-9668-ae0ac1f47326","Type":"ContainerStarted","Data":"200a01e86b2c331d7ddd161a530f29ee9f7cf086452cc9aa79b60db39aae1b56"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.634283 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" event={"ID":"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03","Type":"ContainerStarted","Data":"902a1972d4c2bf38e307462e4f066f384d3e8dba7818588dd8fa5977b468143a"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.643852 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.644115 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-n7f6b" event={"ID":"4cf854d0-c203-4a18-8c15-67d75a1928da","Type":"ContainerStarted","Data":"fa61e156859ee8553951f033559aa6fecbcf3e5ae6fce6ff872108609fb4b2f3"} Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.644379 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.144360768 +0000 UTC m=+159.954822762 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.645211 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" podStartSLOduration=134.645192202 podStartE2EDuration="2m14.645192202s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:36.597989466 +0000 UTC m=+159.408451460" watchObservedRunningTime="2025-11-25 09:30:36.645192202 +0000 UTC m=+159.455654196" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.649854 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" event={"ID":"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0","Type":"ContainerStarted","Data":"b0060ba1937583f85d44b89a60c96ae1896a505224d550158c9ff3b64b1d8258"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.663795 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-6m6pm" event={"ID":"3c14528b-b8c0-4b6d-90c8-95904704d096","Type":"ContainerStarted","Data":"878e2bddd9a007367c2ad006266fd019cded835ae3122477e6f56a42b9b87f67"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.670142 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" event={"ID":"0eecaac3-4db7-4d35-8387-31050571269f","Type":"ContainerStarted","Data":"331d7d13db06a21f9cbb60b7d59f29dd955b0c2f48dd13a416c50ef91028a3ee"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.671435 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" event={"ID":"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6","Type":"ContainerStarted","Data":"1132367c90b443dbac03849eac39c36a1ed4c69ce12743c8f358853a9d68f38b"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.673043 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" podStartSLOduration=134.673019186 podStartE2EDuration="2m14.673019186s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:36.642700039 +0000 UTC m=+159.453162043" watchObservedRunningTime="2025-11-25 09:30:36.673019186 +0000 UTC m=+159.483481190" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.686463 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" event={"ID":"62d3326f-6c05-47b4-852f-00ca08c502d1","Type":"ContainerStarted","Data":"8f5a980568640feb801bca010c007fe0816f28be3cb97ca8e691650a6de53d77"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.691841 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" event={"ID":"2eae88c3-e1c2-4990-9db0-50995f7d2696","Type":"ContainerStarted","Data":"d3cce305f3c2941f34eccc1c1c9d3696325879224e25893630ce8c919f246d99"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.695523 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" event={"ID":"e42c2e28-78c7-400d-97f3-ba7ac7336c1d","Type":"ContainerStarted","Data":"953c7e4dd84f4bbea3469d9b6091985b28d3020af39d6a9985638b4210089c7e"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.695938 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.697241 4734 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-59nlg container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" start-of-body= Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.697281 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" podUID="e42c2e28-78c7-400d-97f3-ba7ac7336c1d" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.702948 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2phng" event={"ID":"665eefbd-7f4c-485c-9822-f4f3cc1a67a5","Type":"ContainerStarted","Data":"c9dcfb1ddc7b7039b551dd5368ffc8fe4dec4130dd4334f62f56cd703789846f"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.722698 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" event={"ID":"3c1294ee-c0f8-487b-9639-1281c3b687ed","Type":"ContainerStarted","Data":"6ff0b851ee8f52e67cf03e5ff780b07623ae7861f05790e154c806dc1c794c16"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.726036 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" event={"ID":"61b7ec5c-9240-4232-b303-6a4978e53beb","Type":"ContainerStarted","Data":"765fcd98933f5dd2ff6db6c7ddbdaa432736a4446080e4280663973d543c2485"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.754283 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.754697 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.254667542 +0000 UTC m=+160.065129536 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.754849 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.755383 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.255361683 +0000 UTC m=+160.065823837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.763578 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-6mckc" podStartSLOduration=134.76340462 podStartE2EDuration="2m14.76340462s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:36.754811856 +0000 UTC m=+159.565273870" watchObservedRunningTime="2025-11-25 09:30:36.76340462 +0000 UTC m=+159.573866624" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.772762 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" event={"ID":"a07365e2-39f2-47a6-b55d-38999cba5ea3","Type":"ContainerStarted","Data":"b2d7d96647e66b46fd3c5212c5bf695df968b59a125d4511b6577ecf9dd95a30"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.796565 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" event={"ID":"396d5204-fafd-4cad-b618-bf4642993d64","Type":"ContainerStarted","Data":"69a99881495417256b4761a8c117cf6a6b695c54b884ba65aa013986979030a3"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.796656 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" event={"ID":"396d5204-fafd-4cad-b618-bf4642993d64","Type":"ContainerStarted","Data":"4c5a8b76d7420cabed332211e78541865b5b624e76d36365bf43a1bc8c256463"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.799202 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" event={"ID":"fb8016be-00f4-4c15-b17c-463a385be317","Type":"ContainerStarted","Data":"bf532dda9d29436337938b085108210b06a9478c859ec9f9deb374eee130b82a"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.815704 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" event={"ID":"f3b07318-384d-4be4-9784-27312c2429dc","Type":"ContainerStarted","Data":"8c57cd06cebd58a2f9b89ac42a095303e9b9140978693ab807c72e4458c1e892"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.825358 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" event={"ID":"94b4f653-06ce-483a-9da8-9cf32064525e","Type":"ContainerStarted","Data":"6a8cd55489bf9ab7bf63627c72fb7e836fdb36e1fbf53127a878edfaa8b207da"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.835238 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-pc9l6" event={"ID":"c673b63c-b34e-41b2-ad5f-c7b258ac9b26","Type":"ContainerStarted","Data":"fe57fc67187ce3aca9b31fd9467151fab59d57a05b1dc054abfc47e141ba9392"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.837245 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" event={"ID":"f98607ff-98e0-4c9b-836a-80db389b4529","Type":"ContainerStarted","Data":"b4c67396ec2244ab7533f6199ad5528bcd7fd63b7c2df4806ddb31be36554781"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.838426 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-m4rhm" event={"ID":"efcf38db-26a3-43b0-95d3-60578dd458e8","Type":"ContainerStarted","Data":"5037c4763204b7926001103843f9ffb0dba0d78a21059e3e92c65db8d6c26d7f"} Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.840214 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.845362 4734 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-46nzm container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.845451 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" podUID="b15a18b7-d1cb-4054-9e79-89e2681747f2" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.860646 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.862451 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.36239889 +0000 UTC m=+160.172860884 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:36 crc kubenswrapper[4734]: I1125 09:30:36.965369 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:36 crc kubenswrapper[4734]: E1125 09:30:36.972937 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.47291655 +0000 UTC m=+160.283378544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.038490 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" podStartSLOduration=134.03846812 podStartE2EDuration="2m14.03846812s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.036964045 +0000 UTC m=+159.847426039" watchObservedRunningTime="2025-11-25 09:30:37.03846812 +0000 UTC m=+159.848930114" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.040800 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-6m6pm" podStartSLOduration=135.040781358 podStartE2EDuration="2m15.040781358s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.008245715 +0000 UTC m=+159.818707709" watchObservedRunningTime="2025-11-25 09:30:37.040781358 +0000 UTC m=+159.851243352" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.066436 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.070266 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.57024227 +0000 UTC m=+160.380704254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.071924 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.071993 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-7tm4l" podStartSLOduration=135.071979261 podStartE2EDuration="2m15.071979261s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.069977402 +0000 UTC m=+159.880439396" watchObservedRunningTime="2025-11-25 09:30:37.071979261 +0000 UTC m=+159.882441255" Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.072406 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.572391923 +0000 UTC m=+160.382853917 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.131451 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" podStartSLOduration=134.13142738 podStartE2EDuration="2m14.13142738s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.131190673 +0000 UTC m=+159.941652677" watchObservedRunningTime="2025-11-25 09:30:37.13142738 +0000 UTC m=+159.941889364" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.173406 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.174408 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.674377081 +0000 UTC m=+160.484839075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.279922 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.280286 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.780274495 +0000 UTC m=+160.590736489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.387427 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.388655 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.88863515 +0000 UTC m=+160.699097144 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.389266 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.389790 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.889776994 +0000 UTC m=+160.700238988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.495066 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.495819 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.995777 +0000 UTC m=+160.806238994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.496223 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.496930 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:37.996890163 +0000 UTC m=+160.807352157 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.597794 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.598348 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.098328125 +0000 UTC m=+160.908790119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.611420 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.611897 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.615668 4734 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-5jtcs container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.615723 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" podUID="30dd5c51-7a6e-4571-b3ae-c33150488286" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.701213 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.701623 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.201604961 +0000 UTC m=+161.012066955 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.802913 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.803944 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.303927898 +0000 UTC m=+161.114389892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.852670 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" event={"ID":"1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f","Type":"ContainerStarted","Data":"3338ede5ef88f3d0cd7c18d3bb0da509509537d95d923cf691cf88f9199476ca"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.853256 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.855810 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" event={"ID":"6ac31b82-a8a5-41d0-8588-828d292d6521","Type":"ContainerStarted","Data":"73d74278dff97baf335f24996e084464a3806f35b68d2f87c0cd13fe9805b2ec"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.859438 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" event={"ID":"8cd3ca49-4666-4bb9-a7de-10127d4cea72","Type":"ContainerStarted","Data":"e6a20d8b7197cd37b4594f15630780854979962ece6b45e8a94a1b519e7b3787"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.866662 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" event={"ID":"3c1294ee-c0f8-487b-9639-1281c3b687ed","Type":"ContainerStarted","Data":"b27520116076b65364066d172aae032b0fca564304d7bc6770f9e480efee8d73"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.869982 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" event={"ID":"f502af72-4499-47f8-adbf-8c1fa6aeddf9","Type":"ContainerStarted","Data":"7d219badf77362adff76c8994e0a1cfd7fdbfd0c6cb792a5cdcd944f0adcd9d8"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.872494 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" event={"ID":"fb8016be-00f4-4c15-b17c-463a385be317","Type":"ContainerStarted","Data":"bc269dc7d1418fd711a7042d9d522d788c1204720e03766dd3ad056930c7b38e"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.872767 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.875305 4734 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-4qpf8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.875376 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" podUID="fb8016be-00f4-4c15-b17c-463a385be317" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.877764 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-kh9hk" event={"ID":"d9e9d0ee-441c-46f3-9913-db2719215839","Type":"ContainerStarted","Data":"251abb7a742e94fc83f148be1a53d2289d79b5057831cf4c60d99ca4d6d883ed"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.884921 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" event={"ID":"f0bde9ec-13b8-4565-b5f4-d2b2a308ab03","Type":"ContainerStarted","Data":"fcd5499a6f248f54b78f4d5a905b70288ec0cede05bd4e05550ff9fed1caaad3"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.888077 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" event={"ID":"2eae88c3-e1c2-4990-9db0-50995f7d2696","Type":"ContainerStarted","Data":"69e84ed0f5a9078116ac8f56b638d3360ffc078887f9d243a93c659e7f611715"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.888184 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" event={"ID":"2eae88c3-e1c2-4990-9db0-50995f7d2696","Type":"ContainerStarted","Data":"8de0e019644540d19c296672acaeea2f3dd3c16c1582250e5d873dd8b671f582"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.890212 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-mr42k" event={"ID":"f503ad2e-0a8a-4e18-b2bd-7d0680c76b3f","Type":"ContainerStarted","Data":"68fcc3f0458406c2046715246d12fb2a95994b3d815e9868ed7031658c70ff6a"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.893687 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" event={"ID":"0eecaac3-4db7-4d35-8387-31050571269f","Type":"ContainerStarted","Data":"65c632a6763ae180b220c17278da76ab84bf0852bca11f30da8940cb430a0a2c"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.896773 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" event={"ID":"f3b07318-384d-4be4-9784-27312c2429dc","Type":"ContainerStarted","Data":"1a32a22df5fd8cfe6dbc294089bbe25244934138a4bd1cb155336af0adaebeb6"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.896831 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" event={"ID":"f3b07318-384d-4be4-9784-27312c2429dc","Type":"ContainerStarted","Data":"5de6c0da3e67cb2710de665720eb73ff65878ca125f2751b541003fbfdffe78c"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.898275 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" event={"ID":"f98607ff-98e0-4c9b-836a-80db389b4529","Type":"ContainerStarted","Data":"980e8a87fb533274e2cb071ea80298821870c0a89c1889c8cc1774b9c003cbe1"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.901558 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" podStartSLOduration=135.901532537 podStartE2EDuration="2m15.901532537s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.899483366 +0000 UTC m=+160.709945360" watchObservedRunningTime="2025-11-25 09:30:37.901532537 +0000 UTC m=+160.711994531" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.903586 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-m4rhm" event={"ID":"efcf38db-26a3-43b0-95d3-60578dd458e8","Type":"ContainerStarted","Data":"2e093c8dfb39d1c1b95a761348396d0bd59060c6785a7132f290880182957975"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.904924 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:37 crc kubenswrapper[4734]: E1125 09:30:37.905399 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.40538199 +0000 UTC m=+161.215843984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.906695 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-pc9l6" event={"ID":"c673b63c-b34e-41b2-ad5f-c7b258ac9b26","Type":"ContainerStarted","Data":"2df23ac8e17944bb407cacff6ab33fda76ee3cda403cd7c7692df23d78bceb29"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.909946 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" event={"ID":"2af950d7-5574-421c-a368-2f4a74be9344","Type":"ContainerStarted","Data":"05cb65d499cc1bfebfe8e5ae47bc2c55aca08588c966ee64f7dc09785538a866"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.910922 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.912636 4734 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-bmmjg container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.912671 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" event={"ID":"90a4f17b-b8b7-4aee-95c6-2c1ab0c3ebe0","Type":"ContainerStarted","Data":"e15491ee2e1d74c12cffbd4a5e7a370ccafc73add4e09c2d8f51062b9ab9bc18"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.912719 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" podUID="2af950d7-5574-421c-a368-2f4a74be9344" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.915338 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" event={"ID":"61b7ec5c-9240-4232-b303-6a4978e53beb","Type":"ContainerStarted","Data":"eb5010c9c539b77bd9a5bed2b11281d3c3473dfee99d274683f446b68752a66c"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.917104 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" event={"ID":"a07365e2-39f2-47a6-b55d-38999cba5ea3","Type":"ContainerStarted","Data":"fdccf4aa0420979574097b37c27464929913dc854390b00b87493b1096d59414"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.917317 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" event={"ID":"a07365e2-39f2-47a6-b55d-38999cba5ea3","Type":"ContainerStarted","Data":"91f589da56f98cae7169236f7029533ea732cd4e1d40b5a998d00606530d98df"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.920276 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" event={"ID":"93b2feab-2bfb-4bf2-b63d-b8cd5253509f","Type":"ContainerStarted","Data":"7ad6fb58047bd799fb80169b122b6e7a96823600dc16425513e477490e0b5694"} Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.920840 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.922475 4734 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-ncw46 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.922530 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" podUID="96ac3c5a-8e5f-41f6-9668-ae0ac1f47326" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.922631 4734 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-59nlg container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" start-of-body= Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.922699 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" podUID="e42c2e28-78c7-400d-97f3-ba7ac7336c1d" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.20:5443/healthz\": dial tcp 10.217.0.20:5443: connect: connection refused" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.952355 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-kh9hk" podStartSLOduration=5.952310549 podStartE2EDuration="5.952310549s" podCreationTimestamp="2025-11-25 09:30:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.952286208 +0000 UTC m=+160.762748202" watchObservedRunningTime="2025-11-25 09:30:37.952310549 +0000 UTC m=+160.762772543" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.953024 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" podStartSLOduration=135.95301246 podStartE2EDuration="2m15.95301246s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.930561206 +0000 UTC m=+160.741023210" watchObservedRunningTime="2025-11-25 09:30:37.95301246 +0000 UTC m=+160.763474444" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.968854 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rmm42" podStartSLOduration=135.968835298 podStartE2EDuration="2m15.968835298s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.968442306 +0000 UTC m=+160.778904320" watchObservedRunningTime="2025-11-25 09:30:37.968835298 +0000 UTC m=+160.779297292" Nov 25 09:30:37 crc kubenswrapper[4734]: I1125 09:30:37.989243 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-xzqtm" podStartSLOduration=135.989223141 podStartE2EDuration="2m15.989223141s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:37.986708337 +0000 UTC m=+160.797170331" watchObservedRunningTime="2025-11-25 09:30:37.989223141 +0000 UTC m=+160.799685135" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.007918 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.010669 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.510646855 +0000 UTC m=+161.321108849 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.062445 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-hf5p7" podStartSLOduration=135.062415697 podStartE2EDuration="2m15.062415697s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.041066565 +0000 UTC m=+160.851528569" watchObservedRunningTime="2025-11-25 09:30:38.062415697 +0000 UTC m=+160.872877691" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.069526 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-86g8w" podStartSLOduration=135.069498857 podStartE2EDuration="2m15.069498857s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.067179248 +0000 UTC m=+160.877641262" watchObservedRunningTime="2025-11-25 09:30:38.069498857 +0000 UTC m=+160.879960851" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.105486 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ktlr9" podStartSLOduration=136.105466011 podStartE2EDuration="2m16.105466011s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.102213705 +0000 UTC m=+160.912675709" watchObservedRunningTime="2025-11-25 09:30:38.105466011 +0000 UTC m=+160.915928005" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.112949 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.113280 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.613267572 +0000 UTC m=+161.423729556 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.160120 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.171705 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-mr42k" podStartSLOduration=7.17168263 podStartE2EDuration="7.17168263s" podCreationTimestamp="2025-11-25 09:30:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.140039014 +0000 UTC m=+160.950501038" watchObservedRunningTime="2025-11-25 09:30:38.17168263 +0000 UTC m=+160.982144624" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.171905 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-nbrjt" podStartSLOduration=136.171901527 podStartE2EDuration="2m16.171901527s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.16963668 +0000 UTC m=+160.980098694" watchObservedRunningTime="2025-11-25 09:30:38.171901527 +0000 UTC m=+160.982363521" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.182039 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.183393 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.183458 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.202287 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" podStartSLOduration=135.202268365 podStartE2EDuration="2m15.202268365s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.198072891 +0000 UTC m=+161.008534885" watchObservedRunningTime="2025-11-25 09:30:38.202268365 +0000 UTC m=+161.012730359" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.214564 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.214958 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.71492203 +0000 UTC m=+161.525384024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.215222 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.215778 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.715757524 +0000 UTC m=+161.526219518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.238316 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-pc9l6" podStartSLOduration=136.238279521 podStartE2EDuration="2m16.238279521s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.225526744 +0000 UTC m=+161.035988748" watchObservedRunningTime="2025-11-25 09:30:38.238279521 +0000 UTC m=+161.048741515" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.262389 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" podStartSLOduration=135.262359843 podStartE2EDuration="2m15.262359843s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.261419296 +0000 UTC m=+161.071881290" watchObservedRunningTime="2025-11-25 09:30:38.262359843 +0000 UTC m=+161.072821837" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.314755 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" podStartSLOduration=136.314733873 podStartE2EDuration="2m16.314733873s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.312005812 +0000 UTC m=+161.122467806" watchObservedRunningTime="2025-11-25 09:30:38.314733873 +0000 UTC m=+161.125195867" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.316561 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.316913 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.816900157 +0000 UTC m=+161.627362151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.393292 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-nr7rm" podStartSLOduration=136.393271637 podStartE2EDuration="2m16.393271637s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.390723792 +0000 UTC m=+161.201185786" watchObservedRunningTime="2025-11-25 09:30:38.393271637 +0000 UTC m=+161.203733631" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.394072 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-n7f6b" podStartSLOduration=136.394064891 podStartE2EDuration="2m16.394064891s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.366612328 +0000 UTC m=+161.177074332" watchObservedRunningTime="2025-11-25 09:30:38.394064891 +0000 UTC m=+161.204526885" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.418106 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.418615 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:38.918601627 +0000 UTC m=+161.729063611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.419248 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-pnvcs" podStartSLOduration=136.419208395 podStartE2EDuration="2m16.419208395s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.419021399 +0000 UTC m=+161.229483393" watchObservedRunningTime="2025-11-25 09:30:38.419208395 +0000 UTC m=+161.229670389" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.453042 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" podStartSLOduration=38.453009985 podStartE2EDuration="38.453009985s" podCreationTimestamp="2025-11-25 09:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.450247343 +0000 UTC m=+161.260709357" watchObservedRunningTime="2025-11-25 09:30:38.453009985 +0000 UTC m=+161.263471979" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.474689 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pgxfp" podStartSLOduration=136.474659375 podStartE2EDuration="2m16.474659375s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.473986235 +0000 UTC m=+161.284448239" watchObservedRunningTime="2025-11-25 09:30:38.474659375 +0000 UTC m=+161.285121369" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.501180 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-vflz8" podStartSLOduration=136.501142139 podStartE2EDuration="2m16.501142139s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.49913979 +0000 UTC m=+161.309601784" watchObservedRunningTime="2025-11-25 09:30:38.501142139 +0000 UTC m=+161.311604133" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.519729 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.519965 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.019926755 +0000 UTC m=+161.830388759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.520207 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.520648 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.020632346 +0000 UTC m=+161.831094340 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.539138 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-c48tj" podStartSLOduration=136.539078281 podStartE2EDuration="2m16.539078281s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.538547306 +0000 UTC m=+161.349009290" watchObservedRunningTime="2025-11-25 09:30:38.539078281 +0000 UTC m=+161.349564246" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.621897 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.622278 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.122259193 +0000 UTC m=+161.932721187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.722850 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.723279 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.223264452 +0000 UTC m=+162.033726446 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.875171 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.375153516 +0000 UTC m=+162.185615510 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.875072 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.875354 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.875685 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.375677701 +0000 UTC m=+162.186139695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.929356 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-2phng" event={"ID":"665eefbd-7f4c-485c-9822-f4f3cc1a67a5","Type":"ContainerStarted","Data":"d7f1c9733eaba96b0d37a536825e645359c908a42c9ef3bc8f67aad9f4562d75"} Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.930170 4734 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-4qpf8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.930201 4734 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-ncw46 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.930213 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" podUID="fb8016be-00f4-4c15-b17c-463a385be317" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.930246 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" podUID="96ac3c5a-8e5f-41f6-9668-ae0ac1f47326" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.930342 4734 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-bmmjg container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.930386 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" podUID="2af950d7-5574-421c-a368-2f4a74be9344" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.984916 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.986518 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.48648548 +0000 UTC m=+162.296947484 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:38 crc kubenswrapper[4734]: I1125 09:30:38.986908 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:38 crc kubenswrapper[4734]: E1125 09:30:38.996589 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.496563098 +0000 UTC m=+162.307025092 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.006265 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9h7bw" podStartSLOduration=137.006223064 podStartE2EDuration="2m17.006223064s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.952416472 +0000 UTC m=+161.762878456" watchObservedRunningTime="2025-11-25 09:30:39.006223064 +0000 UTC m=+161.816685068" Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.007700 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-2phng" podStartSLOduration=137.007691628 podStartE2EDuration="2m17.007691628s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:38.98379296 +0000 UTC m=+161.794254954" watchObservedRunningTime="2025-11-25 09:30:39.007691628 +0000 UTC m=+161.818153622" Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.027574 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" podStartSLOduration=136.027555985 podStartE2EDuration="2m16.027555985s" podCreationTimestamp="2025-11-25 09:28:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:39.023870156 +0000 UTC m=+161.834332150" watchObservedRunningTime="2025-11-25 09:30:39.027555985 +0000 UTC m=+161.838017979" Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.050759 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b9pbx" podStartSLOduration=137.050739931 podStartE2EDuration="2m17.050739931s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:39.049173725 +0000 UTC m=+161.859635739" watchObservedRunningTime="2025-11-25 09:30:39.050739931 +0000 UTC m=+161.861201925" Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.073319 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-ddc5m" podStartSLOduration=137.073297449 podStartE2EDuration="2m17.073297449s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:39.070494076 +0000 UTC m=+161.880956080" watchObservedRunningTime="2025-11-25 09:30:39.073297449 +0000 UTC m=+161.883759453" Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.088780 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.090643 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.590608021 +0000 UTC m=+162.401070015 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.178243 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.178334 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.192193 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.192743 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.692720713 +0000 UTC m=+162.503182707 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.293254 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.293469 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.793440273 +0000 UTC m=+162.603902267 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.293502 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.293833 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.793821764 +0000 UTC m=+162.604283758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.395108 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.395269 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.895247175 +0000 UTC m=+162.705709169 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.396113 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.396482 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.896473142 +0000 UTC m=+162.706935136 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.497500 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.498002 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:39.997984955 +0000 UTC m=+162.808446949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.598779 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.599273 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.099256872 +0000 UTC m=+162.909718866 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.700286 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.700414 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.200395055 +0000 UTC m=+163.010857059 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.700978 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.701684 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.201649482 +0000 UTC m=+163.012111646 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.802544 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.803017 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.302998641 +0000 UTC m=+163.113460635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:39 crc kubenswrapper[4734]: I1125 09:30:39.904627 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:39 crc kubenswrapper[4734]: E1125 09:30:39.904993 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.404979088 +0000 UTC m=+163.215441082 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.006258 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.006562 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.506519873 +0000 UTC m=+163.316981877 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.007307 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.007925 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.507816841 +0000 UTC m=+163.318278835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.108414 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.108668 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.608625384 +0000 UTC m=+163.419087378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.109591 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.109749 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.609731107 +0000 UTC m=+163.420193151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.177932 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.178479 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.211247 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.211452 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.711409685 +0000 UTC m=+163.521871679 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.211606 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.211925 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.71191464 +0000 UTC m=+163.522376634 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.313360 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.313585 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.813553328 +0000 UTC m=+163.624015322 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.313650 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.314017 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.814002581 +0000 UTC m=+163.624464575 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.414476 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.414616 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.914596088 +0000 UTC m=+163.725058082 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.414806 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.415299 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:40.915287458 +0000 UTC m=+163.725749452 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.516392 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.516627 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.016594176 +0000 UTC m=+163.827056170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.516721 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.517312 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.017285496 +0000 UTC m=+163.827747490 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.617893 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.618134 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.118105099 +0000 UTC m=+163.928567103 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.618198 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.618573 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.118562123 +0000 UTC m=+163.929024127 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.719396 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.719565 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.219537621 +0000 UTC m=+164.029999615 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.719797 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.720501 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.220480529 +0000 UTC m=+164.030942533 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.820896 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.821176 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.321127517 +0000 UTC m=+164.131589501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.821259 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.821653 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.321631772 +0000 UTC m=+164.132093946 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.922218 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.922521 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.422478936 +0000 UTC m=+164.232940930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.922715 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:40 crc kubenswrapper[4734]: E1125 09:30:40.923172 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.423158446 +0000 UTC m=+164.233620440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.946774 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" event={"ID":"f98607ff-98e0-4c9b-836a-80db389b4529","Type":"ContainerStarted","Data":"9ca47d76ba830bb502d2308daf1ec58bb91da576bd1466b2ec5df327f82c85a1"} Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.950524 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-m4rhm" event={"ID":"efcf38db-26a3-43b0-95d3-60578dd458e8","Type":"ContainerStarted","Data":"5fed92a948c9763356f9dad18f588422b9963a48d38b2eefdca42d07d7ece819"} Nov 25 09:30:40 crc kubenswrapper[4734]: I1125 09:30:40.950807 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.025175 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.025389 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.525356359 +0000 UTC m=+164.335818353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.025508 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.025839 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.525831763 +0000 UTC m=+164.336293757 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.062140 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-67tg6" podStartSLOduration=139.062057165 podStartE2EDuration="2m19.062057165s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:41.012998123 +0000 UTC m=+163.823460117" watchObservedRunningTime="2025-11-25 09:30:41.062057165 +0000 UTC m=+163.872519159" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.126360 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.126513 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.626487891 +0000 UTC m=+164.436949885 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.126636 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.127242 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.627225543 +0000 UTC m=+164.437687537 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.192721 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:41 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:41 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:41 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.192843 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.227769 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.228114 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.728078147 +0000 UTC m=+164.538540141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.329041 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.329601 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.829575921 +0000 UTC m=+164.640037915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.408183 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-m4rhm" podStartSLOduration=9.408166906 podStartE2EDuration="9.408166906s" podCreationTimestamp="2025-11-25 09:30:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:41.064698443 +0000 UTC m=+163.875160437" watchObservedRunningTime="2025-11-25 09:30:41.408166906 +0000 UTC m=+164.218628900" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.409696 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.410488 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.415440 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.415617 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.430191 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.430355 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.930324452 +0000 UTC m=+164.740786446 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.430494 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.430941 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:41.930925 +0000 UTC m=+164.741386994 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.461525 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.532066 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.532138 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.032117624 +0000 UTC m=+164.842579628 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.532931 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/229cbb7c-c097-4228-825f-8d75e0e87d43-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"229cbb7c-c097-4228-825f-8d75e0e87d43\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.533107 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/229cbb7c-c097-4228-825f-8d75e0e87d43-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"229cbb7c-c097-4228-825f-8d75e0e87d43\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.533333 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.533738 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.033716791 +0000 UTC m=+164.844178805 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.634264 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.634504 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/229cbb7c-c097-4228-825f-8d75e0e87d43-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"229cbb7c-c097-4228-825f-8d75e0e87d43\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.634529 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/229cbb7c-c097-4228-825f-8d75e0e87d43-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"229cbb7c-c097-4228-825f-8d75e0e87d43\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.634982 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.134966747 +0000 UTC m=+164.945428731 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.635025 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/229cbb7c-c097-4228-825f-8d75e0e87d43-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"229cbb7c-c097-4228-825f-8d75e0e87d43\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.702683 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/229cbb7c-c097-4228-825f-8d75e0e87d43-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"229cbb7c-c097-4228-825f-8d75e0e87d43\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.706666 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.729901 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.736547 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.737034 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.237013337 +0000 UTC m=+165.047475321 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.838131 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.838371 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.338314254 +0000 UTC m=+165.148776248 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.838703 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:41 crc kubenswrapper[4734]: E1125 09:30:41.840180 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.340143198 +0000 UTC m=+165.150605372 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.871373 4734 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-2jfg2 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.871477 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" podUID="1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.871757 4734 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-2jfg2 container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Nov 25 09:30:41 crc kubenswrapper[4734]: I1125 09:30:41.871859 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" podUID="1b9441f8-90cf-4d2d-9e89-41a8a2bd4a5f" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:41.995014 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:41.995752 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.495722592 +0000 UTC m=+165.306184576 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.040007 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" event={"ID":"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6","Type":"ContainerStarted","Data":"7382f25a779def468db93c475a1153ca5b064dd8bc0553bd9159c112cf3fb5c2"} Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.097469 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.097908 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.597887575 +0000 UTC m=+165.408349569 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.184461 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:42 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:42 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:42 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.184521 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.198920 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.199274 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.699259515 +0000 UTC m=+165.509721509 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.300282 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.300768 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.800757458 +0000 UTC m=+165.611219452 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.401778 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.402181 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:42.902163228 +0000 UTC m=+165.712625212 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.485066 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.503725 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.504151 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.004134506 +0000 UTC m=+165.814596550 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.584044 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.584143 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.605057 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.605809 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.105778103 +0000 UTC m=+165.916240097 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.605971 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.608208 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.108194225 +0000 UTC m=+165.918656219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.625796 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.650962 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-5jtcs" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.707270 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.708742 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.208708499 +0000 UTC m=+166.019170483 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.752066 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.810198 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.810271 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.810294 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.810349 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.811744 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.814413 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.314389746 +0000 UTC m=+166.124851810 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.866501 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:30:42 crc kubenswrapper[4734]: I1125 09:30:42.918131 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:42 crc kubenswrapper[4734]: E1125 09:30:42.919771 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.419744224 +0000 UTC m=+166.230206218 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.021477 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.022010 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.521991069 +0000 UTC m=+166.332453053 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.060832 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k44hp"] Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.062126 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.072247 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"229cbb7c-c097-4228-825f-8d75e0e87d43","Type":"ContainerStarted","Data":"1bf456ff15b8c3bd7c8184450af598a65879524e280a7b5566a968969de966a6"} Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.110414 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.122492 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k44hp"] Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.122666 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gdg5c"] Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.123278 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.123702 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-utilities\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.123769 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-catalog-content\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.123889 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hglvt\" (UniqueName: \"kubernetes.io/projected/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-kube-api-access-hglvt\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.124030 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.624007338 +0000 UTC m=+166.434469332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.124129 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.137966 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.206376 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:43 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:43 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:43 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.206438 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.226819 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-catalog-content\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.226875 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.226897 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-utilities\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.226921 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-utilities\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.226945 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdc2g\" (UniqueName: \"kubernetes.io/projected/64417a45-ee86-4f32-8f68-7d17d9e493cc-kube-api-access-sdc2g\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.226982 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-catalog-content\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.227045 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hglvt\" (UniqueName: \"kubernetes.io/projected/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-kube-api-access-hglvt\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.227667 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.727654505 +0000 UTC m=+166.538116499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.228174 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-utilities\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.228426 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-catalog-content\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.236616 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gdg5c"] Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.294962 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hglvt\" (UniqueName: \"kubernetes.io/projected/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-kube-api-access-hglvt\") pod \"certified-operators-k44hp\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.332258 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.332662 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-catalog-content\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.332733 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-utilities\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.332770 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdc2g\" (UniqueName: \"kubernetes.io/projected/64417a45-ee86-4f32-8f68-7d17d9e493cc-kube-api-access-sdc2g\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.333234 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.833204348 +0000 UTC m=+166.643666342 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.333851 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-catalog-content\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.334785 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-utilities\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.335204 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k7vf4"] Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.336535 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.404685 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.419367 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdc2g\" (UniqueName: \"kubernetes.io/projected/64417a45-ee86-4f32-8f68-7d17d9e493cc-kube-api-access-sdc2g\") pod \"community-operators-gdg5c\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.435624 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2n5k\" (UniqueName: \"kubernetes.io/projected/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-kube-api-access-x2n5k\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.435752 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-utilities\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.435782 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-catalog-content\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.435819 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.436372 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:43.93634561 +0000 UTC m=+166.746807774 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.444331 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k7vf4"] Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.462944 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.497528 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ct4kr"] Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.504468 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.537809 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.538161 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.038107181 +0000 UTC m=+166.848569175 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.538222 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-utilities\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.538277 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2n5k\" (UniqueName: \"kubernetes.io/projected/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-kube-api-access-x2n5k\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.538452 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-utilities\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.538513 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-catalog-content\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.538546 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.538573 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-catalog-content\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.538594 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r56nq\" (UniqueName: \"kubernetes.io/projected/8e35bc80-2357-4dd8-85f0-2a88916f13a8-kube-api-access-r56nq\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.539136 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-utilities\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.539188 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.039177412 +0000 UTC m=+166.849639396 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.539445 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-catalog-content\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.578887 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ct4kr"] Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.641508 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2n5k\" (UniqueName: \"kubernetes.io/projected/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-kube-api-access-x2n5k\") pod \"certified-operators-k7vf4\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.665421 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.665565 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.165538341 +0000 UTC m=+166.976000335 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.665917 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-catalog-content\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.665967 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.665990 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r56nq\" (UniqueName: \"kubernetes.io/projected/8e35bc80-2357-4dd8-85f0-2a88916f13a8-kube-api-access-r56nq\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.666091 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-utilities\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.666811 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-utilities\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.667059 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-catalog-content\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.667416 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.167398436 +0000 UTC m=+166.977860430 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.708215 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.754808 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r56nq\" (UniqueName: \"kubernetes.io/projected/8e35bc80-2357-4dd8-85f0-2a88916f13a8-kube-api-access-r56nq\") pod \"community-operators-ct4kr\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.783578 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.784299 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.284280445 +0000 UTC m=+167.094742429 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.881605 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.886143 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.886564 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.386549521 +0000 UTC m=+167.197011515 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:43 crc kubenswrapper[4734]: I1125 09:30:43.988300 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:43 crc kubenswrapper[4734]: E1125 09:30:43.988745 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.488726165 +0000 UTC m=+167.299188159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.039796 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gdg5c"] Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.091049 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.091554 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.591534717 +0000 UTC m=+167.401996721 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.091995 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.092047 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.105792 4734 patch_prober.go:28] interesting pod/console-f9d7485db-6m6pm container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.105882 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-6m6pm" podUID="3c14528b-b8c0-4b6d-90c8-95904704d096" containerName="console" probeResult="failure" output="Get \"https://10.217.0.15:8443/health\": dial tcp 10.217.0.15:8443: connect: connection refused" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.123832 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"229cbb7c-c097-4228-825f-8d75e0e87d43","Type":"ContainerStarted","Data":"875ebe110663443e7cfc0ebdff89ce6eb12891e5bc0e87be11a2de24e70c36f9"} Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.155529 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncw46" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.170283 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k44hp"] Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.194635 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.196794 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.69675982 +0000 UTC m=+167.507221814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.218389 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:44 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:44 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:44 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.218436 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.225769 4734 patch_prober.go:28] interesting pod/apiserver-76f77b778f-64gk2 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]log ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]etcd ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/max-in-flight-filter ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 09:30:44 crc kubenswrapper[4734]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 25 09:30:44 crc kubenswrapper[4734]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/openshift.io-startinformers ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 09:30:44 crc kubenswrapper[4734]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 09:30:44 crc kubenswrapper[4734]: livez check failed Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.225843 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" podUID="93b2feab-2bfb-4bf2-b63d-b8cd5253509f" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.301192 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.301597 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.801583532 +0000 UTC m=+167.612045526 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.403505 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.404000 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:44.903980982 +0000 UTC m=+167.714442976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.498424 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-59nlg" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.515875 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.516266 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.016252603 +0000 UTC m=+167.826714597 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.616587 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.617415 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.619098 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.119078145 +0000 UTC m=+167.929540139 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.653461 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/54363663-3559-4203-bf8f-03e3bf4d1127-metrics-certs\") pod \"network-metrics-daemon-tfr8m\" (UID: \"54363663-3559-4203-bf8f-03e3bf4d1127\") " pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.664582 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-tfr8m" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.721485 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.722301 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.222278859 +0000 UTC m=+168.032740853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.747451 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ct4kr"] Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.794074 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.801996 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-n7f6b" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.823071 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.823637 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.323605367 +0000 UTC m=+168.134067361 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:44 crc kubenswrapper[4734]: W1125 09:30:44.829254 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e35bc80_2357_4dd8_85f0_2a88916f13a8.slice/crio-5fbe1d3e31833c30735e46d9f573994316eee9b4476fcdea9f615f1901415370 WatchSource:0}: Error finding container 5fbe1d3e31833c30735e46d9f573994316eee9b4476fcdea9f615f1901415370: Status 404 returned error can't find the container with id 5fbe1d3e31833c30735e46d9f573994316eee9b4476fcdea9f615f1901415370 Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.852211 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k7vf4"] Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.904502 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2jfg2" Nov 25 09:30:44 crc kubenswrapper[4734]: I1125 09:30:44.924258 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:44 crc kubenswrapper[4734]: E1125 09:30:44.925572 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.425559134 +0000 UTC m=+168.236021128 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.025967 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.026981 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.526949344 +0000 UTC m=+168.337411358 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.060294 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.067410 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9sdhr"] Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.074201 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.120803 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.128936 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qds45\" (UniqueName: \"kubernetes.io/projected/0cfee19b-29a3-4816-b501-e2420d36b371-kube-api-access-qds45\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.129033 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-utilities\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.129121 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-catalog-content\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.129479 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.129815 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.629797668 +0000 UTC m=+168.440259662 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.143278 4734 generic.go:334] "Generic (PLEG): container finished" podID="64417a45-ee86-4f32-8f68-7d17d9e493cc" containerID="391a89151ababc98f6f3d4f2db4752d8791b779aba6a94d1571f5cea3a1caf6c" exitCode=0 Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.143364 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdg5c" event={"ID":"64417a45-ee86-4f32-8f68-7d17d9e493cc","Type":"ContainerDied","Data":"391a89151ababc98f6f3d4f2db4752d8791b779aba6a94d1571f5cea3a1caf6c"} Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.143437 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdg5c" event={"ID":"64417a45-ee86-4f32-8f68-7d17d9e493cc","Type":"ContainerStarted","Data":"8d4bfc69c74eec894d930e7fff7a1a75049e81d40e229bbb2f9b48a9d83a245a"} Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.158938 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9sdhr"] Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.176940 4734 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.177639 4734 generic.go:334] "Generic (PLEG): container finished" podID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerID="143aa931fb884bfcf9fbb1d9908f9def036f98e0b36a2334015ee4dcf4cfad21" exitCode=0 Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.177724 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k44hp" event={"ID":"47c1c075-b2f5-4370-b399-2d0fdfc66d3e","Type":"ContainerDied","Data":"143aa931fb884bfcf9fbb1d9908f9def036f98e0b36a2334015ee4dcf4cfad21"} Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.177752 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k44hp" event={"ID":"47c1c075-b2f5-4370-b399-2d0fdfc66d3e","Type":"ContainerStarted","Data":"6fe5d518b3ca00ac61b819d34dc6ff8929c4d84219ef8b5c14f2cbc55ea4eac3"} Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.178849 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-bmmjg" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.179891 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.182960 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-tfr8m"] Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.192555 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.203222 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:45 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:45 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:45 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.203320 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.213442 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ct4kr" event={"ID":"8e35bc80-2357-4dd8-85f0-2a88916f13a8","Type":"ContainerStarted","Data":"5fbe1d3e31833c30735e46d9f573994316eee9b4476fcdea9f615f1901415370"} Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.231770 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.231935 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-utilities\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.231961 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-catalog-content\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.232113 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qds45\" (UniqueName: \"kubernetes.io/projected/0cfee19b-29a3-4816-b501-e2420d36b371-kube-api-access-qds45\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.232360 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.732346092 +0000 UTC m=+168.542808076 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.233614 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-utilities\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.234236 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-catalog-content\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.241580 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" event={"ID":"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6","Type":"ContainerStarted","Data":"a4127a7f73f9c383c1f8845006422ab8f65a2880e5f36a5abc19bf97744bb417"} Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.286527 4734 generic.go:334] "Generic (PLEG): container finished" podID="229cbb7c-c097-4228-825f-8d75e0e87d43" containerID="875ebe110663443e7cfc0ebdff89ce6eb12891e5bc0e87be11a2de24e70c36f9" exitCode=0 Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.286952 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"229cbb7c-c097-4228-825f-8d75e0e87d43","Type":"ContainerDied","Data":"875ebe110663443e7cfc0ebdff89ce6eb12891e5bc0e87be11a2de24e70c36f9"} Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.291034 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qds45\" (UniqueName: \"kubernetes.io/projected/0cfee19b-29a3-4816-b501-e2420d36b371-kube-api-access-qds45\") pod \"redhat-marketplace-9sdhr\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.306904 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7vf4" event={"ID":"accddc60-d025-4bf2-b7f4-fc7cfbc624ed","Type":"ContainerStarted","Data":"5e3dd45b668c1664849d79e548f09d29e41e84343bb8988843ab62bcc361969c"} Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.338658 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.340869 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.840849923 +0000 UTC m=+168.651312097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.439720 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.441783 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:45.940060318 +0000 UTC m=+168.750522312 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.444116 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.465367 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jmrhz"] Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.475183 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.493413 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jmrhz"] Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.541467 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ssz7\" (UniqueName: \"kubernetes.io/projected/9925f447-5834-4ade-bda0-61901cd4af1a-kube-api-access-4ssz7\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.541524 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-utilities\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.541560 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-catalog-content\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.541594 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.541983 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.041967394 +0000 UTC m=+168.852429578 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.642983 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.643236 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ssz7\" (UniqueName: \"kubernetes.io/projected/9925f447-5834-4ade-bda0-61901cd4af1a-kube-api-access-4ssz7\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.643271 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-utilities\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.643307 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-catalog-content\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.643928 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-catalog-content\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.644019 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.143999503 +0000 UTC m=+168.954461507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.644602 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-utilities\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.670763 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ssz7\" (UniqueName: \"kubernetes.io/projected/9925f447-5834-4ade-bda0-61901cd4af1a-kube-api-access-4ssz7\") pod \"redhat-marketplace-jmrhz\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.713549 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.716955 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.718738 4734 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.724134 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.727698 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.730168 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.744762 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffc24dff-415c-4699-a097-7d1bb364ce56-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ffc24dff-415c-4699-a097-7d1bb364ce56\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.744870 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffc24dff-415c-4699-a097-7d1bb364ce56-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ffc24dff-415c-4699-a097-7d1bb364ce56\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.744926 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.745520 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.245453555 +0000 UTC m=+169.055915549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.843267 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.845741 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.846218 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffc24dff-415c-4699-a097-7d1bb364ce56-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ffc24dff-415c-4699-a097-7d1bb364ce56\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.846341 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffc24dff-415c-4699-a097-7d1bb364ce56-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ffc24dff-415c-4699-a097-7d1bb364ce56\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.846439 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffc24dff-415c-4699-a097-7d1bb364ce56-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"ffc24dff-415c-4699-a097-7d1bb364ce56\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.846562 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.346536076 +0000 UTC m=+169.156998070 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.852792 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.864782 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9sdhr"] Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.877051 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffc24dff-415c-4699-a097-7d1bb364ce56-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"ffc24dff-415c-4699-a097-7d1bb364ce56\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.947631 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/229cbb7c-c097-4228-825f-8d75e0e87d43-kubelet-dir\") pod \"229cbb7c-c097-4228-825f-8d75e0e87d43\" (UID: \"229cbb7c-c097-4228-825f-8d75e0e87d43\") " Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.947822 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/229cbb7c-c097-4228-825f-8d75e0e87d43-kube-api-access\") pod \"229cbb7c-c097-4228-825f-8d75e0e87d43\" (UID: \"229cbb7c-c097-4228-825f-8d75e0e87d43\") " Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.948112 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:45 crc kubenswrapper[4734]: E1125 09:30:45.948593 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.448573795 +0000 UTC m=+169.259035789 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.948806 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/229cbb7c-c097-4228-825f-8d75e0e87d43-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "229cbb7c-c097-4228-825f-8d75e0e87d43" (UID: "229cbb7c-c097-4228-825f-8d75e0e87d43"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:30:45 crc kubenswrapper[4734]: I1125 09:30:45.960893 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/229cbb7c-c097-4228-825f-8d75e0e87d43-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "229cbb7c-c097-4228-825f-8d75e0e87d43" (UID: "229cbb7c-c097-4228-825f-8d75e0e87d43"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.049966 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.050992 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/229cbb7c-c097-4228-825f-8d75e0e87d43-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.051011 4734 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/229cbb7c-c097-4228-825f-8d75e0e87d43-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:46 crc kubenswrapper[4734]: E1125 09:30:46.051179 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.55115214 +0000 UTC m=+169.361614134 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.067735 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-md25m"] Nov 25 09:30:46 crc kubenswrapper[4734]: E1125 09:30:46.068287 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="229cbb7c-c097-4228-825f-8d75e0e87d43" containerName="pruner" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.068315 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="229cbb7c-c097-4228-825f-8d75e0e87d43" containerName="pruner" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.068426 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="229cbb7c-c097-4228-825f-8d75e0e87d43" containerName="pruner" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.069389 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.077047 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.114200 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-md25m"] Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.124138 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.152512 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-utilities\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.152616 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-catalog-content\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.152737 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dvzv\" (UniqueName: \"kubernetes.io/projected/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-kube-api-access-5dvzv\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.152798 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:46 crc kubenswrapper[4734]: E1125 09:30:46.153200 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.653184219 +0000 UTC m=+169.463646383 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.182165 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:46 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:46 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:46 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.182260 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.254299 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.254665 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dvzv\" (UniqueName: \"kubernetes.io/projected/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-kube-api-access-5dvzv\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.254781 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-utilities\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.254822 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-catalog-content\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.255436 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-catalog-content\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: E1125 09:30:46.255731 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.755699033 +0000 UTC m=+169.566161177 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.255748 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-utilities\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.291468 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dvzv\" (UniqueName: \"kubernetes.io/projected/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-kube-api-access-5dvzv\") pod \"redhat-operators-md25m\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.297662 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jmrhz"] Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.356482 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:46 crc kubenswrapper[4734]: E1125 09:30:46.356913 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.856896437 +0000 UTC m=+169.667358431 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pmwh4" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.364321 4734 generic.go:334] "Generic (PLEG): container finished" podID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerID="f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be" exitCode=0 Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.364843 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ct4kr" event={"ID":"8e35bc80-2357-4dd8-85f0-2a88916f13a8","Type":"ContainerDied","Data":"f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.380541 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" event={"ID":"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6","Type":"ContainerStarted","Data":"84d5fb7472669791ebe6bde45893fc64e2912c9791e43c494bf36944f4bb9541"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.380622 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" event={"ID":"a3eedf9c-efe3-4ef4-bb51-5a0a345733f6","Type":"ContainerStarted","Data":"eb59ea07513ef55dc28abf70e1e63c59396f70452a6cdda011d8d7f7acbc9e1e"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.403959 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.404417 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"229cbb7c-c097-4228-825f-8d75e0e87d43","Type":"ContainerDied","Data":"1bf456ff15b8c3bd7c8184450af598a65879524e280a7b5566a968969de966a6"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.404626 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1bf456ff15b8c3bd7c8184450af598a65879524e280a7b5566a968969de966a6" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.409110 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9sdhr" event={"ID":"0cfee19b-29a3-4816-b501-e2420d36b371","Type":"ContainerStarted","Data":"892705774a83cb33cac3474472ee1630bdb3247b09ff1653470e2a748545c4af"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.409195 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9sdhr" event={"ID":"0cfee19b-29a3-4816-b501-e2420d36b371","Type":"ContainerStarted","Data":"f1d9f6dc6204dce607e01342e48f82c22f389d24dc0842ecd66ce123bac4be1d"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.416131 4734 generic.go:334] "Generic (PLEG): container finished" podID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerID="6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773" exitCode=0 Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.416225 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7vf4" event={"ID":"accddc60-d025-4bf2-b7f4-fc7cfbc624ed","Type":"ContainerDied","Data":"6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.419439 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.421739 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-dc5dm" podStartSLOduration=14.421704085 podStartE2EDuration="14.421704085s" podCreationTimestamp="2025-11-25 09:30:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:46.418892652 +0000 UTC m=+169.229354646" watchObservedRunningTime="2025-11-25 09:30:46.421704085 +0000 UTC m=+169.232166099" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.434900 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" event={"ID":"54363663-3559-4203-bf8f-03e3bf4d1127","Type":"ContainerStarted","Data":"1fde2b7c5618aeca65f8bfcae4714df9c5f1fb3a347c0fff7a4809c11e41ffad"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.435006 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" event={"ID":"54363663-3559-4203-bf8f-03e3bf4d1127","Type":"ContainerStarted","Data":"482dc33c4441166853bd9de19e9ca53dae70872588821edd1de7b7f6d6242e16"} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.467371 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:46 crc kubenswrapper[4734]: E1125 09:30:46.468253 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:30:46.968219921 +0000 UTC m=+169.778681915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.469581 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nhd9g"] Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.473539 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.475262 4734 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T09:30:45.718764975Z","Handler":null,"Name":""} Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.481468 4734 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.481531 4734 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.523875 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nhd9g"] Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.569491 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgm6p\" (UniqueName: \"kubernetes.io/projected/de135379-0cb7-4ec3-a16a-43e7856a7b7f-kube-api-access-hgm6p\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.569534 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.569601 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-utilities\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.569622 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-catalog-content\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.573325 4734 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.573363 4734 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.575754 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.629634 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pmwh4\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.673063 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.673538 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgm6p\" (UniqueName: \"kubernetes.io/projected/de135379-0cb7-4ec3-a16a-43e7856a7b7f-kube-api-access-hgm6p\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.673610 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-utilities\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.673630 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-catalog-content\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.674319 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-catalog-content\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.675794 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-utilities\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.707621 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.712110 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgm6p\" (UniqueName: \"kubernetes.io/projected/de135379-0cb7-4ec3-a16a-43e7856a7b7f-kube-api-access-hgm6p\") pod \"redhat-operators-nhd9g\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.750649 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-md25m"] Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.822728 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:46 crc kubenswrapper[4734]: I1125 09:30:46.868026 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.103519 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pmwh4"] Nov 25 09:30:47 crc kubenswrapper[4734]: W1125 09:30:47.125602 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8579ba87_e7d7_41e0_8ca6_3beaee3dd354.slice/crio-7c97b9a69a0d0001ec872e8f995f5f6571cac619f74b90c0cce5e0aa6b852de6 WatchSource:0}: Error finding container 7c97b9a69a0d0001ec872e8f995f5f6571cac619f74b90c0cce5e0aa6b852de6: Status 404 returned error can't find the container with id 7c97b9a69a0d0001ec872e8f995f5f6571cac619f74b90c0cce5e0aa6b852de6 Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.185172 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:47 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:47 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:47 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.185230 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.192693 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nhd9g"] Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.243956 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-m4rhm" Nov 25 09:30:47 crc kubenswrapper[4734]: W1125 09:30:47.272595 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde135379_0cb7_4ec3_a16a_43e7856a7b7f.slice/crio-a627e2fde580910306091fdaee4810580416d2d4cd47ff9a08c4de9ef5c84a94 WatchSource:0}: Error finding container a627e2fde580910306091fdaee4810580416d2d4cd47ff9a08c4de9ef5c84a94: Status 404 returned error can't find the container with id a627e2fde580910306091fdaee4810580416d2d4cd47ff9a08c4de9ef5c84a94 Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.445240 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ffc24dff-415c-4699-a097-7d1bb364ce56","Type":"ContainerStarted","Data":"c18ba82405e41b3bc0e975b339a79a29ce8f85b4a803cd4f0986b8782ee7a587"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.445293 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ffc24dff-415c-4699-a097-7d1bb364ce56","Type":"ContainerStarted","Data":"ce2ddccbd6b18a2c84ffec857d8fabe501e31a6bda73aeff2e869913c0567db3"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.449064 4734 generic.go:334] "Generic (PLEG): container finished" podID="9925f447-5834-4ade-bda0-61901cd4af1a" containerID="214347f6cbc8dcc6362822b94db806942b7de73b6079fe4500576684b6ee291b" exitCode=0 Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.449146 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jmrhz" event={"ID":"9925f447-5834-4ade-bda0-61901cd4af1a","Type":"ContainerDied","Data":"214347f6cbc8dcc6362822b94db806942b7de73b6079fe4500576684b6ee291b"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.449226 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jmrhz" event={"ID":"9925f447-5834-4ade-bda0-61901cd4af1a","Type":"ContainerStarted","Data":"097009b8ba493199315b15b82d802724cd72143a2a67a6cf54a0b5eb3928f5a5"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.453378 4734 generic.go:334] "Generic (PLEG): container finished" podID="0cfee19b-29a3-4816-b501-e2420d36b371" containerID="892705774a83cb33cac3474472ee1630bdb3247b09ff1653470e2a748545c4af" exitCode=0 Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.453485 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9sdhr" event={"ID":"0cfee19b-29a3-4816-b501-e2420d36b371","Type":"ContainerDied","Data":"892705774a83cb33cac3474472ee1630bdb3247b09ff1653470e2a748545c4af"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.455798 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" event={"ID":"8579ba87-e7d7-41e0-8ca6-3beaee3dd354","Type":"ContainerStarted","Data":"7c97b9a69a0d0001ec872e8f995f5f6571cac619f74b90c0cce5e0aa6b852de6"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.458173 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-tfr8m" event={"ID":"54363663-3559-4203-bf8f-03e3bf4d1127","Type":"ContainerStarted","Data":"6ea2c87ae34ea6ec905dec4216017f4601ff9a6390ed7f6a8929951044547e86"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.459969 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nhd9g" event={"ID":"de135379-0cb7-4ec3-a16a-43e7856a7b7f","Type":"ContainerStarted","Data":"a627e2fde580910306091fdaee4810580416d2d4cd47ff9a08c4de9ef5c84a94"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.462776 4734 generic.go:334] "Generic (PLEG): container finished" podID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" containerID="cfcfec3addc5fa6fd2e9a2eb7b9ec13f858730cd082c295bfd2f2f2f4da6cbe0" exitCode=0 Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.463957 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md25m" event={"ID":"09b0af8a-c69a-4ff7-9836-7f6e349c48a4","Type":"ContainerDied","Data":"cfcfec3addc5fa6fd2e9a2eb7b9ec13f858730cd082c295bfd2f2f2f4da6cbe0"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.463999 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md25m" event={"ID":"09b0af8a-c69a-4ff7-9836-7f6e349c48a4","Type":"ContainerStarted","Data":"137bf0c20304a8c2f7b41c3da031bf7328d7943b1c1111a3a6529c77350e6922"} Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.506226 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-tfr8m" podStartSLOduration=145.506205685 podStartE2EDuration="2m25.506205685s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:47.499052383 +0000 UTC m=+170.309514377" watchObservedRunningTime="2025-11-25 09:30:47.506205685 +0000 UTC m=+170.316667669" Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.594603 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:47 crc kubenswrapper[4734]: I1125 09:30:47.605313 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-64gk2" Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.182225 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:48 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:48 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:48 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.182311 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.267290 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.474191 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" event={"ID":"8579ba87-e7d7-41e0-8ca6-3beaee3dd354","Type":"ContainerStarted","Data":"944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702"} Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.474291 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.479276 4734 generic.go:334] "Generic (PLEG): container finished" podID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" containerID="8338b713b5d35ad7b02d28212dc20de4d60fddcb73933b13a22eec5fa0f3dcec" exitCode=0 Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.479384 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nhd9g" event={"ID":"de135379-0cb7-4ec3-a16a-43e7856a7b7f","Type":"ContainerDied","Data":"8338b713b5d35ad7b02d28212dc20de4d60fddcb73933b13a22eec5fa0f3dcec"} Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.492798 4734 generic.go:334] "Generic (PLEG): container finished" podID="ffc24dff-415c-4699-a097-7d1bb364ce56" containerID="c18ba82405e41b3bc0e975b339a79a29ce8f85b4a803cd4f0986b8782ee7a587" exitCode=0 Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.492894 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ffc24dff-415c-4699-a097-7d1bb364ce56","Type":"ContainerDied","Data":"c18ba82405e41b3bc0e975b339a79a29ce8f85b4a803cd4f0986b8782ee7a587"} Nov 25 09:30:48 crc kubenswrapper[4734]: I1125 09:30:48.507666 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" podStartSLOduration=146.507608565 podStartE2EDuration="2m26.507608565s" podCreationTimestamp="2025-11-25 09:28:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:30:48.498545847 +0000 UTC m=+171.309007851" watchObservedRunningTime="2025-11-25 09:30:48.507608565 +0000 UTC m=+171.318070569" Nov 25 09:30:49 crc kubenswrapper[4734]: I1125 09:30:49.180680 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:49 crc kubenswrapper[4734]: [-]has-synced failed: reason withheld Nov 25 09:30:49 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:49 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:49 crc kubenswrapper[4734]: I1125 09:30:49.181102 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:49 crc kubenswrapper[4734]: I1125 09:30:49.509269 4734 generic.go:334] "Generic (PLEG): container finished" podID="61b7ec5c-9240-4232-b303-6a4978e53beb" containerID="eb5010c9c539b77bd9a5bed2b11281d3c3473dfee99d274683f446b68752a66c" exitCode=0 Nov 25 09:30:49 crc kubenswrapper[4734]: I1125 09:30:49.509494 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" event={"ID":"61b7ec5c-9240-4232-b303-6a4978e53beb","Type":"ContainerDied","Data":"eb5010c9c539b77bd9a5bed2b11281d3c3473dfee99d274683f446b68752a66c"} Nov 25 09:30:49 crc kubenswrapper[4734]: I1125 09:30:49.996201 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.047587 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffc24dff-415c-4699-a097-7d1bb364ce56-kubelet-dir\") pod \"ffc24dff-415c-4699-a097-7d1bb364ce56\" (UID: \"ffc24dff-415c-4699-a097-7d1bb364ce56\") " Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.047695 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ffc24dff-415c-4699-a097-7d1bb364ce56-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "ffc24dff-415c-4699-a097-7d1bb364ce56" (UID: "ffc24dff-415c-4699-a097-7d1bb364ce56"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.047802 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffc24dff-415c-4699-a097-7d1bb364ce56-kube-api-access\") pod \"ffc24dff-415c-4699-a097-7d1bb364ce56\" (UID: \"ffc24dff-415c-4699-a097-7d1bb364ce56\") " Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.048019 4734 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/ffc24dff-415c-4699-a097-7d1bb364ce56-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.055334 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffc24dff-415c-4699-a097-7d1bb364ce56-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "ffc24dff-415c-4699-a097-7d1bb364ce56" (UID: "ffc24dff-415c-4699-a097-7d1bb364ce56"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.149288 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ffc24dff-415c-4699-a097-7d1bb364ce56-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.180694 4734 patch_prober.go:28] interesting pod/router-default-5444994796-pc9l6 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:30:50 crc kubenswrapper[4734]: [+]has-synced ok Nov 25 09:30:50 crc kubenswrapper[4734]: [+]process-running ok Nov 25 09:30:50 crc kubenswrapper[4734]: healthz check failed Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.180952 4734 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-pc9l6" podUID="c673b63c-b34e-41b2-ad5f-c7b258ac9b26" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.566642 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.567281 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"ffc24dff-415c-4699-a097-7d1bb364ce56","Type":"ContainerDied","Data":"ce2ddccbd6b18a2c84ffec857d8fabe501e31a6bda73aeff2e869913c0567db3"} Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.567316 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce2ddccbd6b18a2c84ffec857d8fabe501e31a6bda73aeff2e869913c0567db3" Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.696045 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:30:50 crc kubenswrapper[4734]: I1125 09:30:50.696135 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:30:51 crc kubenswrapper[4734]: I1125 09:30:51.185447 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:51 crc kubenswrapper[4734]: I1125 09:30:51.187876 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-pc9l6" Nov 25 09:30:52 crc kubenswrapper[4734]: I1125 09:30:52.807801 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:30:52 crc kubenswrapper[4734]: I1125 09:30:52.808163 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:30:52 crc kubenswrapper[4734]: I1125 09:30:52.807824 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:30:52 crc kubenswrapper[4734]: I1125 09:30:52.808231 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:30:54 crc kubenswrapper[4734]: I1125 09:30:54.105443 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:30:54 crc kubenswrapper[4734]: I1125 09:30:54.111059 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-6m6pm" Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.807542 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.808144 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.808199 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.808834 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"7f1af46ed7ff1aae8264c246a2edf72c96b39f0eaeb36c62503fbae471b7d849"} pod="openshift-console/downloads-7954f5f757-dqksf" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.808935 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" containerID="cri-o://7f1af46ed7ff1aae8264c246a2edf72c96b39f0eaeb36c62503fbae471b7d849" gracePeriod=2 Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.807634 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.809113 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.809567 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:31:02 crc kubenswrapper[4734]: I1125 09:31:02.809638 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:31:04 crc kubenswrapper[4734]: I1125 09:31:04.679901 4734 generic.go:334] "Generic (PLEG): container finished" podID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerID="7f1af46ed7ff1aae8264c246a2edf72c96b39f0eaeb36c62503fbae471b7d849" exitCode=0 Nov 25 09:31:04 crc kubenswrapper[4734]: I1125 09:31:04.680436 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dqksf" event={"ID":"be35ecf5-83be-4063-9d6a-939bd1a78def","Type":"ContainerDied","Data":"7f1af46ed7ff1aae8264c246a2edf72c96b39f0eaeb36c62503fbae471b7d849"} Nov 25 09:31:06 crc kubenswrapper[4734]: I1125 09:31:06.828509 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:31:10 crc kubenswrapper[4734]: I1125 09:31:10.480336 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:31:12 crc kubenswrapper[4734]: I1125 09:31:12.807773 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:31:12 crc kubenswrapper[4734]: I1125 09:31:12.807889 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:31:15 crc kubenswrapper[4734]: I1125 09:31:15.059443 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-jbtkt" Nov 25 09:31:20 crc kubenswrapper[4734]: I1125 09:31:20.695513 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:31:20 crc kubenswrapper[4734]: I1125 09:31:20.695824 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:31:22 crc kubenswrapper[4734]: I1125 09:31:22.809211 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:31:22 crc kubenswrapper[4734]: I1125 09:31:22.809578 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:31:32 crc kubenswrapper[4734]: I1125 09:31:32.809688 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:31:32 crc kubenswrapper[4734]: I1125 09:31:32.810412 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:31:42 crc kubenswrapper[4734]: I1125 09:31:42.808342 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:31:42 crc kubenswrapper[4734]: I1125 09:31:42.808769 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:31:50 crc kubenswrapper[4734]: I1125 09:31:50.696029 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:31:50 crc kubenswrapper[4734]: I1125 09:31:50.696346 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:31:50 crc kubenswrapper[4734]: I1125 09:31:50.696396 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:31:50 crc kubenswrapper[4734]: I1125 09:31:50.696970 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:31:50 crc kubenswrapper[4734]: I1125 09:31:50.697021 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45" gracePeriod=600 Nov 25 09:31:52 crc kubenswrapper[4734]: I1125 09:31:52.807824 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:31:52 crc kubenswrapper[4734]: I1125 09:31:52.808212 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:32:02 crc kubenswrapper[4734]: I1125 09:32:02.808390 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:32:02 crc kubenswrapper[4734]: I1125 09:32:02.809142 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:32:05 crc kubenswrapper[4734]: I1125 09:32:05.032048 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45" exitCode=0 Nov 25 09:32:05 crc kubenswrapper[4734]: I1125 09:32:05.032169 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45"} Nov 25 09:32:12 crc kubenswrapper[4734]: I1125 09:32:12.807573 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:32:12 crc kubenswrapper[4734]: I1125 09:32:12.807700 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:32:13 crc kubenswrapper[4734]: E1125 09:32:13.787028 4734 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 09:32:13 crc kubenswrapper[4734]: E1125 09:32:13.787497 4734 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hglvt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-k44hp_openshift-marketplace(47c1c075-b2f5-4370-b399-2d0fdfc66d3e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:32:13 crc kubenswrapper[4734]: E1125 09:32:13.788723 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-k44hp" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" Nov 25 09:32:14 crc kubenswrapper[4734]: E1125 09:32:14.806770 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-k44hp" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" Nov 25 09:32:14 crc kubenswrapper[4734]: I1125 09:32:14.850672 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:32:14 crc kubenswrapper[4734]: I1125 09:32:14.949949 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61b7ec5c-9240-4232-b303-6a4978e53beb-config-volume\") pod \"61b7ec5c-9240-4232-b303-6a4978e53beb\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " Nov 25 09:32:14 crc kubenswrapper[4734]: I1125 09:32:14.950029 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61b7ec5c-9240-4232-b303-6a4978e53beb-secret-volume\") pod \"61b7ec5c-9240-4232-b303-6a4978e53beb\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " Nov 25 09:32:14 crc kubenswrapper[4734]: I1125 09:32:14.950105 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwmf7\" (UniqueName: \"kubernetes.io/projected/61b7ec5c-9240-4232-b303-6a4978e53beb-kube-api-access-jwmf7\") pod \"61b7ec5c-9240-4232-b303-6a4978e53beb\" (UID: \"61b7ec5c-9240-4232-b303-6a4978e53beb\") " Nov 25 09:32:14 crc kubenswrapper[4734]: I1125 09:32:14.950765 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61b7ec5c-9240-4232-b303-6a4978e53beb-config-volume" (OuterVolumeSpecName: "config-volume") pod "61b7ec5c-9240-4232-b303-6a4978e53beb" (UID: "61b7ec5c-9240-4232-b303-6a4978e53beb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:32:14 crc kubenswrapper[4734]: I1125 09:32:14.955623 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61b7ec5c-9240-4232-b303-6a4978e53beb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "61b7ec5c-9240-4232-b303-6a4978e53beb" (UID: "61b7ec5c-9240-4232-b303-6a4978e53beb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:32:14 crc kubenswrapper[4734]: I1125 09:32:14.957801 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61b7ec5c-9240-4232-b303-6a4978e53beb-kube-api-access-jwmf7" (OuterVolumeSpecName: "kube-api-access-jwmf7") pod "61b7ec5c-9240-4232-b303-6a4978e53beb" (UID: "61b7ec5c-9240-4232-b303-6a4978e53beb"). InnerVolumeSpecName "kube-api-access-jwmf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:32:15 crc kubenswrapper[4734]: I1125 09:32:15.052289 4734 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/61b7ec5c-9240-4232-b303-6a4978e53beb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:32:15 crc kubenswrapper[4734]: I1125 09:32:15.052339 4734 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/61b7ec5c-9240-4232-b303-6a4978e53beb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:32:15 crc kubenswrapper[4734]: I1125 09:32:15.052359 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwmf7\" (UniqueName: \"kubernetes.io/projected/61b7ec5c-9240-4232-b303-6a4978e53beb-kube-api-access-jwmf7\") on node \"crc\" DevicePath \"\"" Nov 25 09:32:15 crc kubenswrapper[4734]: I1125 09:32:15.103448 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" event={"ID":"61b7ec5c-9240-4232-b303-6a4978e53beb","Type":"ContainerDied","Data":"765fcd98933f5dd2ff6db6c7ddbdaa432736a4446080e4280663973d543c2485"} Nov 25 09:32:15 crc kubenswrapper[4734]: I1125 09:32:15.103761 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="765fcd98933f5dd2ff6db6c7ddbdaa432736a4446080e4280663973d543c2485" Nov 25 09:32:15 crc kubenswrapper[4734]: I1125 09:32:15.103908 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4d6wn" Nov 25 09:32:22 crc kubenswrapper[4734]: I1125 09:32:22.808076 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:32:22 crc kubenswrapper[4734]: I1125 09:32:22.808846 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:32:27 crc kubenswrapper[4734]: E1125 09:32:27.350978 4734 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 09:32:27 crc kubenswrapper[4734]: E1125 09:32:27.351451 4734 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r56nq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-ct4kr_openshift-marketplace(8e35bc80-2357-4dd8-85f0-2a88916f13a8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:32:27 crc kubenswrapper[4734]: E1125 09:32:27.352674 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-ct4kr" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" Nov 25 09:32:28 crc kubenswrapper[4734]: E1125 09:32:28.355239 4734 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 09:32:28 crc kubenswrapper[4734]: E1125 09:32:28.355422 4734 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x2n5k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-k7vf4_openshift-marketplace(accddc60-d025-4bf2-b7f4-fc7cfbc624ed): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:32:28 crc kubenswrapper[4734]: E1125 09:32:28.356627 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-k7vf4" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" Nov 25 09:32:29 crc kubenswrapper[4734]: E1125 09:32:29.210400 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-k7vf4" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" Nov 25 09:32:29 crc kubenswrapper[4734]: E1125 09:32:29.210501 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-ct4kr" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" Nov 25 09:32:32 crc kubenswrapper[4734]: E1125 09:32:32.558240 4734 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 09:32:32 crc kubenswrapper[4734]: E1125 09:32:32.558399 4734 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qds45,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-9sdhr_openshift-marketplace(0cfee19b-29a3-4816-b501-e2420d36b371): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:32:32 crc kubenswrapper[4734]: E1125 09:32:32.559681 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-9sdhr" podUID="0cfee19b-29a3-4816-b501-e2420d36b371" Nov 25 09:32:32 crc kubenswrapper[4734]: I1125 09:32:32.807923 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:32:32 crc kubenswrapper[4734]: I1125 09:32:32.807979 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.416171 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-9sdhr" podUID="0cfee19b-29a3-4816-b501-e2420d36b371" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.490720 4734 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.490887 4734 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hgm6p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-nhd9g_openshift-marketplace(de135379-0cb7-4ec3-a16a-43e7856a7b7f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.492310 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-nhd9g" podUID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.493049 4734 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.493157 4734 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4ssz7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-jmrhz_openshift-marketplace(9925f447-5834-4ade-bda0-61901cd4af1a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.494380 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-jmrhz" podUID="9925f447-5834-4ade-bda0-61901cd4af1a" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.512362 4734 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.512860 4734 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sdc2g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-gdg5c_openshift-marketplace(64417a45-ee86-4f32-8f68-7d17d9e493cc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.514041 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-gdg5c" podUID="64417a45-ee86-4f32-8f68-7d17d9e493cc" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.550147 4734 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.550347 4734 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5dvzv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-md25m_openshift-marketplace(09b0af8a-c69a-4ff7-9836-7f6e349c48a4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:32:35 crc kubenswrapper[4734]: E1125 09:32:35.552098 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-md25m" podUID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" Nov 25 09:32:36 crc kubenswrapper[4734]: I1125 09:32:36.229877 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"5eec5927f80d5fed350fbc6c560ee959f6b791701d55a7d8a01b4fa5fe3c1b43"} Nov 25 09:32:36 crc kubenswrapper[4734]: I1125 09:32:36.233701 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-dqksf" event={"ID":"be35ecf5-83be-4063-9d6a-939bd1a78def","Type":"ContainerStarted","Data":"674bbdadc7493c87f886262808562061198a0964a43909dc04a45f33c2ded4d0"} Nov 25 09:32:36 crc kubenswrapper[4734]: I1125 09:32:36.234910 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:32:36 crc kubenswrapper[4734]: I1125 09:32:36.234981 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:32:36 crc kubenswrapper[4734]: E1125 09:32:36.238322 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-md25m" podUID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" Nov 25 09:32:36 crc kubenswrapper[4734]: E1125 09:32:36.238729 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-nhd9g" podUID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" Nov 25 09:32:36 crc kubenswrapper[4734]: E1125 09:32:36.238829 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-jmrhz" podUID="9925f447-5834-4ade-bda0-61901cd4af1a" Nov 25 09:32:36 crc kubenswrapper[4734]: E1125 09:32:36.239013 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-gdg5c" podUID="64417a45-ee86-4f32-8f68-7d17d9e493cc" Nov 25 09:32:37 crc kubenswrapper[4734]: I1125 09:32:37.242246 4734 generic.go:334] "Generic (PLEG): container finished" podID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerID="d88f5d754d4d01f23ada11fef003012067333e3b51be414efa9ba3521f809c64" exitCode=0 Nov 25 09:32:37 crc kubenswrapper[4734]: I1125 09:32:37.242331 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k44hp" event={"ID":"47c1c075-b2f5-4370-b399-2d0fdfc66d3e","Type":"ContainerDied","Data":"d88f5d754d4d01f23ada11fef003012067333e3b51be414efa9ba3521f809c64"} Nov 25 09:32:37 crc kubenswrapper[4734]: I1125 09:32:37.243122 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:32:37 crc kubenswrapper[4734]: I1125 09:32:37.243574 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:32:37 crc kubenswrapper[4734]: I1125 09:32:37.246258 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:32:38 crc kubenswrapper[4734]: I1125 09:32:38.253281 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k44hp" event={"ID":"47c1c075-b2f5-4370-b399-2d0fdfc66d3e","Type":"ContainerStarted","Data":"db3149302f43d8b4948f4a71169e312969d6beda67e8cfadb3ae6bfc8eff027a"} Nov 25 09:32:38 crc kubenswrapper[4734]: I1125 09:32:38.254037 4734 patch_prober.go:28] interesting pod/downloads-7954f5f757-dqksf container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Nov 25 09:32:38 crc kubenswrapper[4734]: I1125 09:32:38.254066 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-dqksf" podUID="be35ecf5-83be-4063-9d6a-939bd1a78def" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Nov 25 09:32:38 crc kubenswrapper[4734]: I1125 09:32:38.275196 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k44hp" podStartSLOduration=3.851052516 podStartE2EDuration="1m56.275177477s" podCreationTimestamp="2025-11-25 09:30:42 +0000 UTC" firstStartedPulling="2025-11-25 09:30:45.181300462 +0000 UTC m=+167.991762456" lastFinishedPulling="2025-11-25 09:32:37.605425423 +0000 UTC m=+280.415887417" observedRunningTime="2025-11-25 09:32:38.271829239 +0000 UTC m=+281.082291223" watchObservedRunningTime="2025-11-25 09:32:38.275177477 +0000 UTC m=+281.085639471" Nov 25 09:32:42 crc kubenswrapper[4734]: I1125 09:32:42.819357 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-dqksf" Nov 25 09:32:43 crc kubenswrapper[4734]: I1125 09:32:43.405697 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:32:43 crc kubenswrapper[4734]: I1125 09:32:43.405967 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:32:44 crc kubenswrapper[4734]: I1125 09:32:44.239807 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:32:44 crc kubenswrapper[4734]: I1125 09:32:44.329254 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:32:50 crc kubenswrapper[4734]: I1125 09:32:50.324236 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7vf4" event={"ID":"accddc60-d025-4bf2-b7f4-fc7cfbc624ed","Type":"ContainerStarted","Data":"aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6"} Nov 25 09:32:50 crc kubenswrapper[4734]: I1125 09:32:50.326005 4734 generic.go:334] "Generic (PLEG): container finished" podID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerID="e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a" exitCode=0 Nov 25 09:32:50 crc kubenswrapper[4734]: I1125 09:32:50.326041 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ct4kr" event={"ID":"8e35bc80-2357-4dd8-85f0-2a88916f13a8","Type":"ContainerDied","Data":"e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a"} Nov 25 09:32:51 crc kubenswrapper[4734]: I1125 09:32:51.342790 4734 generic.go:334] "Generic (PLEG): container finished" podID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerID="aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6" exitCode=0 Nov 25 09:32:51 crc kubenswrapper[4734]: I1125 09:32:51.342961 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7vf4" event={"ID":"accddc60-d025-4bf2-b7f4-fc7cfbc624ed","Type":"ContainerDied","Data":"aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6"} Nov 25 09:32:59 crc kubenswrapper[4734]: I1125 09:32:59.729304 4734 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.300304 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k44hp"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.302051 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k44hp" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerName="registry-server" containerID="cri-o://db3149302f43d8b4948f4a71169e312969d6beda67e8cfadb3ae6bfc8eff027a" gracePeriod=30 Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.311484 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k7vf4"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.319040 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ct4kr"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.326770 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gdg5c"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.333213 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4qpf8"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.333437 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" podUID="fb8016be-00f4-4c15-b17c-463a385be317" containerName="marketplace-operator" containerID="cri-o://bc269dc7d1418fd711a7042d9d522d788c1204720e03766dd3ad056930c7b38e" gracePeriod=30 Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.339906 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9sdhr"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.351575 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jmrhz"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.356644 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rvq8p"] Nov 25 09:33:04 crc kubenswrapper[4734]: E1125 09:33:04.356898 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61b7ec5c-9240-4232-b303-6a4978e53beb" containerName="collect-profiles" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.356913 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="61b7ec5c-9240-4232-b303-6a4978e53beb" containerName="collect-profiles" Nov 25 09:33:04 crc kubenswrapper[4734]: E1125 09:33:04.356925 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc24dff-415c-4699-a097-7d1bb364ce56" containerName="pruner" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.356931 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc24dff-415c-4699-a097-7d1bb364ce56" containerName="pruner" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.357032 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc24dff-415c-4699-a097-7d1bb364ce56" containerName="pruner" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.357045 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="61b7ec5c-9240-4232-b303-6a4978e53beb" containerName="collect-profiles" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.357459 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.361863 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-md25m"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.365111 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nhd9g"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.368634 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rvq8p"] Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.466870 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.467235 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsvkl\" (UniqueName: \"kubernetes.io/projected/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-kube-api-access-qsvkl\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.467298 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.568170 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.568229 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsvkl\" (UniqueName: \"kubernetes.io/projected/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-kube-api-access-qsvkl\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.568260 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.569665 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.574888 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.585061 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsvkl\" (UniqueName: \"kubernetes.io/projected/c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746-kube-api-access-qsvkl\") pod \"marketplace-operator-79b997595-rvq8p\" (UID: \"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746\") " pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:04 crc kubenswrapper[4734]: I1125 09:33:04.673267 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:05 crc kubenswrapper[4734]: I1125 09:33:05.177760 4734 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-4qpf8 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Nov 25 09:33:05 crc kubenswrapper[4734]: I1125 09:33:05.177828 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" podUID="fb8016be-00f4-4c15-b17c-463a385be317" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Nov 25 09:33:05 crc kubenswrapper[4734]: I1125 09:33:05.546916 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-xtsd4"] Nov 25 09:33:06 crc kubenswrapper[4734]: I1125 09:33:06.434224 4734 generic.go:334] "Generic (PLEG): container finished" podID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerID="db3149302f43d8b4948f4a71169e312969d6beda67e8cfadb3ae6bfc8eff027a" exitCode=0 Nov 25 09:33:06 crc kubenswrapper[4734]: I1125 09:33:06.434311 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k44hp" event={"ID":"47c1c075-b2f5-4370-b399-2d0fdfc66d3e","Type":"ContainerDied","Data":"db3149302f43d8b4948f4a71169e312969d6beda67e8cfadb3ae6bfc8eff027a"} Nov 25 09:33:06 crc kubenswrapper[4734]: I1125 09:33:06.435842 4734 generic.go:334] "Generic (PLEG): container finished" podID="fb8016be-00f4-4c15-b17c-463a385be317" containerID="bc269dc7d1418fd711a7042d9d522d788c1204720e03766dd3ad056930c7b38e" exitCode=0 Nov 25 09:33:06 crc kubenswrapper[4734]: I1125 09:33:06.435878 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" event={"ID":"fb8016be-00f4-4c15-b17c-463a385be317","Type":"ContainerDied","Data":"bc269dc7d1418fd711a7042d9d522d788c1204720e03766dd3ad056930c7b38e"} Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.237180 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.241748 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.270851 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-operator-metrics\") pod \"fb8016be-00f4-4c15-b17c-463a385be317\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.270931 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-catalog-content\") pod \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.270971 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hglvt\" (UniqueName: \"kubernetes.io/projected/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-kube-api-access-hglvt\") pod \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.271008 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzpfh\" (UniqueName: \"kubernetes.io/projected/fb8016be-00f4-4c15-b17c-463a385be317-kube-api-access-rzpfh\") pod \"fb8016be-00f4-4c15-b17c-463a385be317\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.271050 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-trusted-ca\") pod \"fb8016be-00f4-4c15-b17c-463a385be317\" (UID: \"fb8016be-00f4-4c15-b17c-463a385be317\") " Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.271095 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-utilities\") pod \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\" (UID: \"47c1c075-b2f5-4370-b399-2d0fdfc66d3e\") " Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.277074 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-utilities" (OuterVolumeSpecName: "utilities") pod "47c1c075-b2f5-4370-b399-2d0fdfc66d3e" (UID: "47c1c075-b2f5-4370-b399-2d0fdfc66d3e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.278951 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "fb8016be-00f4-4c15-b17c-463a385be317" (UID: "fb8016be-00f4-4c15-b17c-463a385be317"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.284578 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "fb8016be-00f4-4c15-b17c-463a385be317" (UID: "fb8016be-00f4-4c15-b17c-463a385be317"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.284584 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb8016be-00f4-4c15-b17c-463a385be317-kube-api-access-rzpfh" (OuterVolumeSpecName: "kube-api-access-rzpfh") pod "fb8016be-00f4-4c15-b17c-463a385be317" (UID: "fb8016be-00f4-4c15-b17c-463a385be317"). InnerVolumeSpecName "kube-api-access-rzpfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.294894 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-kube-api-access-hglvt" (OuterVolumeSpecName: "kube-api-access-hglvt") pod "47c1c075-b2f5-4370-b399-2d0fdfc66d3e" (UID: "47c1c075-b2f5-4370-b399-2d0fdfc66d3e"). InnerVolumeSpecName "kube-api-access-hglvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.331638 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "47c1c075-b2f5-4370-b399-2d0fdfc66d3e" (UID: "47c1c075-b2f5-4370-b399-2d0fdfc66d3e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.372384 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzpfh\" (UniqueName: \"kubernetes.io/projected/fb8016be-00f4-4c15-b17c-463a385be317-kube-api-access-rzpfh\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.372766 4734 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.372930 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.373059 4734 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8016be-00f4-4c15-b17c-463a385be317-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.373457 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.373943 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hglvt\" (UniqueName: \"kubernetes.io/projected/47c1c075-b2f5-4370-b399-2d0fdfc66d3e-kube-api-access-hglvt\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.462788 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k44hp" event={"ID":"47c1c075-b2f5-4370-b399-2d0fdfc66d3e","Type":"ContainerDied","Data":"6fe5d518b3ca00ac61b819d34dc6ff8929c4d84219ef8b5c14f2cbc55ea4eac3"} Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.462847 4734 scope.go:117] "RemoveContainer" containerID="db3149302f43d8b4948f4a71169e312969d6beda67e8cfadb3ae6bfc8eff027a" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.462967 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k44hp" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.471834 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" event={"ID":"fb8016be-00f4-4c15-b17c-463a385be317","Type":"ContainerDied","Data":"bf532dda9d29436337938b085108210b06a9478c859ec9f9deb374eee130b82a"} Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.471911 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-4qpf8" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.508258 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rvq8p"] Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.513681 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k44hp"] Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.520221 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k44hp"] Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.522399 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4qpf8"] Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.524890 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-4qpf8"] Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.535782 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rg94t"] Nov 25 09:33:11 crc kubenswrapper[4734]: E1125 09:33:11.536027 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerName="registry-server" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.536047 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerName="registry-server" Nov 25 09:33:11 crc kubenswrapper[4734]: E1125 09:33:11.536060 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8016be-00f4-4c15-b17c-463a385be317" containerName="marketplace-operator" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.536066 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8016be-00f4-4c15-b17c-463a385be317" containerName="marketplace-operator" Nov 25 09:33:11 crc kubenswrapper[4734]: E1125 09:33:11.536076 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerName="extract-content" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.536101 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerName="extract-content" Nov 25 09:33:11 crc kubenswrapper[4734]: E1125 09:33:11.536127 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerName="extract-utilities" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.536135 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerName="extract-utilities" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.536253 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb8016be-00f4-4c15-b17c-463a385be317" containerName="marketplace-operator" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.536270 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" containerName="registry-server" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.537489 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.547981 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rg94t"] Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.576827 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-catalog-content\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.577134 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9s8g\" (UniqueName: \"kubernetes.io/projected/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-kube-api-access-j9s8g\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.577164 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-utilities\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.677964 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-catalog-content\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.678467 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-catalog-content\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.678463 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9s8g\" (UniqueName: \"kubernetes.io/projected/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-kube-api-access-j9s8g\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.678600 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-utilities\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.679425 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-utilities\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.705621 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9s8g\" (UniqueName: \"kubernetes.io/projected/d0ceab4a-1500-4bc7-b5f6-25e89f57bce8-kube-api-access-j9s8g\") pod \"certified-operators-rg94t\" (UID: \"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8\") " pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:11 crc kubenswrapper[4734]: W1125 09:33:11.843871 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc4b3d5c9_6b58_4f1a_bfb2_c39d7a00c746.slice/crio-c2bc3927091c77076879ce804e7acef23ae5f496a89ce89202f493379e3f4173 WatchSource:0}: Error finding container c2bc3927091c77076879ce804e7acef23ae5f496a89ce89202f493379e3f4173: Status 404 returned error can't find the container with id c2bc3927091c77076879ce804e7acef23ae5f496a89ce89202f493379e3f4173 Nov 25 09:33:11 crc kubenswrapper[4734]: I1125 09:33:11.855872 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:12 crc kubenswrapper[4734]: I1125 09:33:12.072178 4734 scope.go:117] "RemoveContainer" containerID="d88f5d754d4d01f23ada11fef003012067333e3b51be414efa9ba3521f809c64" Nov 25 09:33:12 crc kubenswrapper[4734]: I1125 09:33:12.148826 4734 scope.go:117] "RemoveContainer" containerID="143aa931fb884bfcf9fbb1d9908f9def036f98e0b36a2334015ee4dcf4cfad21" Nov 25 09:33:12 crc kubenswrapper[4734]: I1125 09:33:12.183496 4734 scope.go:117] "RemoveContainer" containerID="bc269dc7d1418fd711a7042d9d522d788c1204720e03766dd3ad056930c7b38e" Nov 25 09:33:12 crc kubenswrapper[4734]: I1125 09:33:12.257456 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47c1c075-b2f5-4370-b399-2d0fdfc66d3e" path="/var/lib/kubelet/pods/47c1c075-b2f5-4370-b399-2d0fdfc66d3e/volumes" Nov 25 09:33:12 crc kubenswrapper[4734]: I1125 09:33:12.258212 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb8016be-00f4-4c15-b17c-463a385be317" path="/var/lib/kubelet/pods/fb8016be-00f4-4c15-b17c-463a385be317/volumes" Nov 25 09:33:12 crc kubenswrapper[4734]: I1125 09:33:12.479059 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" event={"ID":"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746","Type":"ContainerStarted","Data":"c2bc3927091c77076879ce804e7acef23ae5f496a89ce89202f493379e3f4173"} Nov 25 09:33:12 crc kubenswrapper[4734]: I1125 09:33:12.640129 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rg94t"] Nov 25 09:33:12 crc kubenswrapper[4734]: W1125 09:33:12.642782 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0ceab4a_1500_4bc7_b5f6_25e89f57bce8.slice/crio-840aa34a8b9f7f0f05f48870bc3a49f209fe66a305e2a710a1dbc28993ed7f31 WatchSource:0}: Error finding container 840aa34a8b9f7f0f05f48870bc3a49f209fe66a305e2a710a1dbc28993ed7f31: Status 404 returned error can't find the container with id 840aa34a8b9f7f0f05f48870bc3a49f209fe66a305e2a710a1dbc28993ed7f31 Nov 25 09:33:13 crc kubenswrapper[4734]: I1125 09:33:13.489637 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ct4kr" event={"ID":"8e35bc80-2357-4dd8-85f0-2a88916f13a8","Type":"ContainerStarted","Data":"a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6"} Nov 25 09:33:13 crc kubenswrapper[4734]: I1125 09:33:13.492159 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rg94t" event={"ID":"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8","Type":"ContainerStarted","Data":"840aa34a8b9f7f0f05f48870bc3a49f209fe66a305e2a710a1dbc28993ed7f31"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.501007 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" event={"ID":"c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746","Type":"ContainerStarted","Data":"3220e18f17806f82c4b46a0d6285ae803eeef5e1beda07855dd6e8c4c0cb65e5"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.501373 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.502944 4734 generic.go:334] "Generic (PLEG): container finished" podID="0cfee19b-29a3-4816-b501-e2420d36b371" containerID="3ff761927b8a016a2da973c7ea931eabc0210601174ecd999943f102231105ee" exitCode=0 Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.502978 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9sdhr" event={"ID":"0cfee19b-29a3-4816-b501-e2420d36b371","Type":"ContainerDied","Data":"3ff761927b8a016a2da973c7ea931eabc0210601174ecd999943f102231105ee"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.505975 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.508366 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7vf4" event={"ID":"accddc60-d025-4bf2-b7f4-fc7cfbc624ed","Type":"ContainerStarted","Data":"c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.508592 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k7vf4" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerName="registry-server" containerID="cri-o://c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1" gracePeriod=30 Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.512106 4734 generic.go:334] "Generic (PLEG): container finished" podID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" containerID="e2764a3c436f1bed8e0a1ffbc9d440de0d6ac9209a8537ab701359c88ced0523" exitCode=0 Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.512193 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nhd9g" event={"ID":"de135379-0cb7-4ec3-a16a-43e7856a7b7f","Type":"ContainerDied","Data":"e2764a3c436f1bed8e0a1ffbc9d440de0d6ac9209a8537ab701359c88ced0523"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.514816 4734 generic.go:334] "Generic (PLEG): container finished" podID="64417a45-ee86-4f32-8f68-7d17d9e493cc" containerID="022d3f228fc6444d3eb44edc08ebb208d691593a1c0ffa495378db70ce503e48" exitCode=0 Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.514866 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdg5c" event={"ID":"64417a45-ee86-4f32-8f68-7d17d9e493cc","Type":"ContainerDied","Data":"022d3f228fc6444d3eb44edc08ebb208d691593a1c0ffa495378db70ce503e48"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.517700 4734 generic.go:334] "Generic (PLEG): container finished" podID="d0ceab4a-1500-4bc7-b5f6-25e89f57bce8" containerID="2ef5d8c663da7101abfddc0a1ebcb62a86907d7097305797a9ff71dac35c4105" exitCode=0 Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.517771 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rg94t" event={"ID":"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8","Type":"ContainerDied","Data":"2ef5d8c663da7101abfddc0a1ebcb62a86907d7097305797a9ff71dac35c4105"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.526829 4734 generic.go:334] "Generic (PLEG): container finished" podID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" containerID="5d248b85a69b83951e7988d26a2e802ed3c81fd0ec60b23bcfd1e3528caf8de9" exitCode=0 Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.526960 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md25m" event={"ID":"09b0af8a-c69a-4ff7-9836-7f6e349c48a4","Type":"ContainerDied","Data":"5d248b85a69b83951e7988d26a2e802ed3c81fd0ec60b23bcfd1e3528caf8de9"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.530027 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" podStartSLOduration=10.530016394 podStartE2EDuration="10.530016394s" podCreationTimestamp="2025-11-25 09:33:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:33:14.527611563 +0000 UTC m=+317.338073557" watchObservedRunningTime="2025-11-25 09:33:14.530016394 +0000 UTC m=+317.340478388" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.554620 4734 generic.go:334] "Generic (PLEG): container finished" podID="9925f447-5834-4ade-bda0-61901cd4af1a" containerID="a82da392f14bcf0cfdc792aca3434d874cfd495beac41152467c3930fd70367b" exitCode=0 Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.554816 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ct4kr" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerName="registry-server" containerID="cri-o://a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6" gracePeriod=30 Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.554800 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jmrhz" event={"ID":"9925f447-5834-4ade-bda0-61901cd4af1a","Type":"ContainerDied","Data":"a82da392f14bcf0cfdc792aca3434d874cfd495beac41152467c3930fd70367b"} Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.609180 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k7vf4" podStartSLOduration=5.958534709 podStartE2EDuration="2m31.609159897s" podCreationTimestamp="2025-11-25 09:30:43 +0000 UTC" firstStartedPulling="2025-11-25 09:30:46.435276326 +0000 UTC m=+169.245738320" lastFinishedPulling="2025-11-25 09:33:12.085901514 +0000 UTC m=+314.896363508" observedRunningTime="2025-11-25 09:33:14.607605312 +0000 UTC m=+317.418067316" watchObservedRunningTime="2025-11-25 09:33:14.609159897 +0000 UTC m=+317.419621891" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.752793 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ct4kr" podStartSLOduration=6.047804676 podStartE2EDuration="2m31.752770714s" podCreationTimestamp="2025-11-25 09:30:43 +0000 UTC" firstStartedPulling="2025-11-25 09:30:46.367229673 +0000 UTC m=+169.177691677" lastFinishedPulling="2025-11-25 09:33:12.072195721 +0000 UTC m=+314.882657715" observedRunningTime="2025-11-25 09:33:14.750705363 +0000 UTC m=+317.561167357" watchObservedRunningTime="2025-11-25 09:33:14.752770714 +0000 UTC m=+317.563232708" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.840302 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.947429 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.947762 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.948051 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.952587 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.957096 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-utilities\") pod \"0cfee19b-29a3-4816-b501-e2420d36b371\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.957151 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qds45\" (UniqueName: \"kubernetes.io/projected/0cfee19b-29a3-4816-b501-e2420d36b371-kube-api-access-qds45\") pod \"0cfee19b-29a3-4816-b501-e2420d36b371\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.957192 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-catalog-content\") pod \"0cfee19b-29a3-4816-b501-e2420d36b371\" (UID: \"0cfee19b-29a3-4816-b501-e2420d36b371\") " Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.958123 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-utilities" (OuterVolumeSpecName: "utilities") pod "0cfee19b-29a3-4816-b501-e2420d36b371" (UID: "0cfee19b-29a3-4816-b501-e2420d36b371"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.966981 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cfee19b-29a3-4816-b501-e2420d36b371-kube-api-access-qds45" (OuterVolumeSpecName: "kube-api-access-qds45") pod "0cfee19b-29a3-4816-b501-e2420d36b371" (UID: "0cfee19b-29a3-4816-b501-e2420d36b371"). InnerVolumeSpecName "kube-api-access-qds45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:14 crc kubenswrapper[4734]: I1125 09:33:14.989391 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0cfee19b-29a3-4816-b501-e2420d36b371" (UID: "0cfee19b-29a3-4816-b501-e2420d36b371"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.014845 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-k7vf4_accddc60-d025-4bf2-b7f4-fc7cfbc624ed/registry-server/0.log" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.016495 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058161 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-catalog-content\") pod \"9925f447-5834-4ade-bda0-61901cd4af1a\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058223 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-utilities\") pod \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058257 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dvzv\" (UniqueName: \"kubernetes.io/projected/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-kube-api-access-5dvzv\") pod \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058284 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sdc2g\" (UniqueName: \"kubernetes.io/projected/64417a45-ee86-4f32-8f68-7d17d9e493cc-kube-api-access-sdc2g\") pod \"64417a45-ee86-4f32-8f68-7d17d9e493cc\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058309 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ssz7\" (UniqueName: \"kubernetes.io/projected/9925f447-5834-4ade-bda0-61901cd4af1a-kube-api-access-4ssz7\") pod \"9925f447-5834-4ade-bda0-61901cd4af1a\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058348 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-catalog-content\") pod \"64417a45-ee86-4f32-8f68-7d17d9e493cc\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058371 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-utilities\") pod \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058400 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-utilities\") pod \"9925f447-5834-4ade-bda0-61901cd4af1a\" (UID: \"9925f447-5834-4ade-bda0-61901cd4af1a\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058784 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-catalog-content\") pod \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058840 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-utilities\") pod \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058867 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgm6p\" (UniqueName: \"kubernetes.io/projected/de135379-0cb7-4ec3-a16a-43e7856a7b7f-kube-api-access-hgm6p\") pod \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\" (UID: \"de135379-0cb7-4ec3-a16a-43e7856a7b7f\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058902 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2n5k\" (UniqueName: \"kubernetes.io/projected/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-kube-api-access-x2n5k\") pod \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058927 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-utilities" (OuterVolumeSpecName: "utilities") pod "de135379-0cb7-4ec3-a16a-43e7856a7b7f" (UID: "de135379-0cb7-4ec3-a16a-43e7856a7b7f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.058934 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-catalog-content\") pod \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\" (UID: \"09b0af8a-c69a-4ff7-9836-7f6e349c48a4\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.059053 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-utilities\") pod \"64417a45-ee86-4f32-8f68-7d17d9e493cc\" (UID: \"64417a45-ee86-4f32-8f68-7d17d9e493cc\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.059220 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.059262 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.059274 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cfee19b-29a3-4816-b501-e2420d36b371-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.059284 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qds45\" (UniqueName: \"kubernetes.io/projected/0cfee19b-29a3-4816-b501-e2420d36b371-kube-api-access-qds45\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.061022 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-utilities" (OuterVolumeSpecName: "utilities") pod "9925f447-5834-4ade-bda0-61901cd4af1a" (UID: "9925f447-5834-4ade-bda0-61901cd4af1a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.061142 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-utilities" (OuterVolumeSpecName: "utilities") pod "accddc60-d025-4bf2-b7f4-fc7cfbc624ed" (UID: "accddc60-d025-4bf2-b7f4-fc7cfbc624ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.061195 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-utilities" (OuterVolumeSpecName: "utilities") pod "09b0af8a-c69a-4ff7-9836-7f6e349c48a4" (UID: "09b0af8a-c69a-4ff7-9836-7f6e349c48a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.068989 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64417a45-ee86-4f32-8f68-7d17d9e493cc-kube-api-access-sdc2g" (OuterVolumeSpecName: "kube-api-access-sdc2g") pod "64417a45-ee86-4f32-8f68-7d17d9e493cc" (UID: "64417a45-ee86-4f32-8f68-7d17d9e493cc"). InnerVolumeSpecName "kube-api-access-sdc2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.069028 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9925f447-5834-4ade-bda0-61901cd4af1a-kube-api-access-4ssz7" (OuterVolumeSpecName: "kube-api-access-4ssz7") pod "9925f447-5834-4ade-bda0-61901cd4af1a" (UID: "9925f447-5834-4ade-bda0-61901cd4af1a"). InnerVolumeSpecName "kube-api-access-4ssz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.069045 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-kube-api-access-x2n5k" (OuterVolumeSpecName: "kube-api-access-x2n5k") pod "accddc60-d025-4bf2-b7f4-fc7cfbc624ed" (UID: "accddc60-d025-4bf2-b7f4-fc7cfbc624ed"). InnerVolumeSpecName "kube-api-access-x2n5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.069102 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de135379-0cb7-4ec3-a16a-43e7856a7b7f-kube-api-access-hgm6p" (OuterVolumeSpecName: "kube-api-access-hgm6p") pod "de135379-0cb7-4ec3-a16a-43e7856a7b7f" (UID: "de135379-0cb7-4ec3-a16a-43e7856a7b7f"). InnerVolumeSpecName "kube-api-access-hgm6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.069109 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-kube-api-access-5dvzv" (OuterVolumeSpecName: "kube-api-access-5dvzv") pod "09b0af8a-c69a-4ff7-9836-7f6e349c48a4" (UID: "09b0af8a-c69a-4ff7-9836-7f6e349c48a4"). InnerVolumeSpecName "kube-api-access-5dvzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.075914 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-utilities" (OuterVolumeSpecName: "utilities") pod "64417a45-ee86-4f32-8f68-7d17d9e493cc" (UID: "64417a45-ee86-4f32-8f68-7d17d9e493cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.078297 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ct4kr_8e35bc80-2357-4dd8-85f0-2a88916f13a8/registry-server/0.log" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.078930 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.082487 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9925f447-5834-4ade-bda0-61901cd4af1a" (UID: "9925f447-5834-4ade-bda0-61901cd4af1a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.125705 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64417a45-ee86-4f32-8f68-7d17d9e493cc" (UID: "64417a45-ee86-4f32-8f68-7d17d9e493cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160268 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-catalog-content\") pod \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\" (UID: \"accddc60-d025-4bf2-b7f4-fc7cfbc624ed\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160326 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r56nq\" (UniqueName: \"kubernetes.io/projected/8e35bc80-2357-4dd8-85f0-2a88916f13a8-kube-api-access-r56nq\") pod \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160377 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-catalog-content\") pod \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160458 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-utilities\") pod \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\" (UID: \"8e35bc80-2357-4dd8-85f0-2a88916f13a8\") " Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160787 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2n5k\" (UniqueName: \"kubernetes.io/projected/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-kube-api-access-x2n5k\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160804 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160817 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160828 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dvzv\" (UniqueName: \"kubernetes.io/projected/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-kube-api-access-5dvzv\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160841 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sdc2g\" (UniqueName: \"kubernetes.io/projected/64417a45-ee86-4f32-8f68-7d17d9e493cc-kube-api-access-sdc2g\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160852 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ssz7\" (UniqueName: \"kubernetes.io/projected/9925f447-5834-4ade-bda0-61901cd4af1a-kube-api-access-4ssz7\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160863 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64417a45-ee86-4f32-8f68-7d17d9e493cc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160874 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160885 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9925f447-5834-4ade-bda0-61901cd4af1a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160896 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.160907 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgm6p\" (UniqueName: \"kubernetes.io/projected/de135379-0cb7-4ec3-a16a-43e7856a7b7f-kube-api-access-hgm6p\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.161662 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-utilities" (OuterVolumeSpecName: "utilities") pod "8e35bc80-2357-4dd8-85f0-2a88916f13a8" (UID: "8e35bc80-2357-4dd8-85f0-2a88916f13a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.163813 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e35bc80-2357-4dd8-85f0-2a88916f13a8-kube-api-access-r56nq" (OuterVolumeSpecName: "kube-api-access-r56nq") pod "8e35bc80-2357-4dd8-85f0-2a88916f13a8" (UID: "8e35bc80-2357-4dd8-85f0-2a88916f13a8"). InnerVolumeSpecName "kube-api-access-r56nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.189603 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de135379-0cb7-4ec3-a16a-43e7856a7b7f" (UID: "de135379-0cb7-4ec3-a16a-43e7856a7b7f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.192248 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09b0af8a-c69a-4ff7-9836-7f6e349c48a4" (UID: "09b0af8a-c69a-4ff7-9836-7f6e349c48a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.210040 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "accddc60-d025-4bf2-b7f4-fc7cfbc624ed" (UID: "accddc60-d025-4bf2-b7f4-fc7cfbc624ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.214746 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e35bc80-2357-4dd8-85f0-2a88916f13a8" (UID: "8e35bc80-2357-4dd8-85f0-2a88916f13a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.261745 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09b0af8a-c69a-4ff7-9836-7f6e349c48a4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.261778 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/accddc60-d025-4bf2-b7f4-fc7cfbc624ed-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.261789 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r56nq\" (UniqueName: \"kubernetes.io/projected/8e35bc80-2357-4dd8-85f0-2a88916f13a8-kube-api-access-r56nq\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.261821 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.261833 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e35bc80-2357-4dd8-85f0-2a88916f13a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.261842 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de135379-0cb7-4ec3-a16a-43e7856a7b7f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.564583 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdg5c" event={"ID":"64417a45-ee86-4f32-8f68-7d17d9e493cc","Type":"ContainerDied","Data":"8d4bfc69c74eec894d930e7fff7a1a75049e81d40e229bbb2f9b48a9d83a245a"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.564623 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdg5c" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.565012 4734 scope.go:117] "RemoveContainer" containerID="022d3f228fc6444d3eb44edc08ebb208d691593a1c0ffa495378db70ce503e48" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.568869 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-ct4kr_8e35bc80-2357-4dd8-85f0-2a88916f13a8/registry-server/0.log" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.569729 4734 generic.go:334] "Generic (PLEG): container finished" podID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerID="a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6" exitCode=1 Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.569953 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ct4kr" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.569955 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ct4kr" event={"ID":"8e35bc80-2357-4dd8-85f0-2a88916f13a8","Type":"ContainerDied","Data":"a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.570119 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ct4kr" event={"ID":"8e35bc80-2357-4dd8-85f0-2a88916f13a8","Type":"ContainerDied","Data":"5fbe1d3e31833c30735e46d9f573994316eee9b4476fcdea9f615f1901415370"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.572929 4734 generic.go:334] "Generic (PLEG): container finished" podID="d0ceab4a-1500-4bc7-b5f6-25e89f57bce8" containerID="c049de7340f03efd0716a1da795d4a970cee616232b19b8ba5c6d9fe6f9ac6cd" exitCode=0 Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.573075 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rg94t" event={"ID":"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8","Type":"ContainerDied","Data":"c049de7340f03efd0716a1da795d4a970cee616232b19b8ba5c6d9fe6f9ac6cd"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.576388 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-md25m" event={"ID":"09b0af8a-c69a-4ff7-9836-7f6e349c48a4","Type":"ContainerDied","Data":"137bf0c20304a8c2f7b41c3da031bf7328d7943b1c1111a3a6529c77350e6922"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.576424 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-md25m" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.581457 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jmrhz" event={"ID":"9925f447-5834-4ade-bda0-61901cd4af1a","Type":"ContainerDied","Data":"097009b8ba493199315b15b82d802724cd72143a2a67a6cf54a0b5eb3928f5a5"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.581525 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jmrhz" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.583837 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9sdhr" event={"ID":"0cfee19b-29a3-4816-b501-e2420d36b371","Type":"ContainerDied","Data":"f1d9f6dc6204dce607e01342e48f82c22f389d24dc0842ecd66ce123bac4be1d"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.583891 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9sdhr" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.586296 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-k7vf4_accddc60-d025-4bf2-b7f4-fc7cfbc624ed/registry-server/0.log" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.587164 4734 generic.go:334] "Generic (PLEG): container finished" podID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerID="c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1" exitCode=1 Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.587247 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k7vf4" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.587249 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7vf4" event={"ID":"accddc60-d025-4bf2-b7f4-fc7cfbc624ed","Type":"ContainerDied","Data":"c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.587322 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k7vf4" event={"ID":"accddc60-d025-4bf2-b7f4-fc7cfbc624ed","Type":"ContainerDied","Data":"5e3dd45b668c1664849d79e548f09d29e41e84343bb8988843ab62bcc361969c"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.589804 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nhd9g" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.589856 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nhd9g" event={"ID":"de135379-0cb7-4ec3-a16a-43e7856a7b7f","Type":"ContainerDied","Data":"a627e2fde580910306091fdaee4810580416d2d4cd47ff9a08c4de9ef5c84a94"} Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.602483 4734 scope.go:117] "RemoveContainer" containerID="391a89151ababc98f6f3d4f2db4752d8791b779aba6a94d1571f5cea3a1caf6c" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.638529 4734 scope.go:117] "RemoveContainer" containerID="a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.656557 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gdg5c"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.658634 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gdg5c"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.669303 4734 scope.go:117] "RemoveContainer" containerID="e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.688109 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jmrhz"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.693621 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jmrhz"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.725989 4734 scope.go:117] "RemoveContainer" containerID="f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.726050 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-md25m"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.730066 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-md25m"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.749361 4734 scope.go:117] "RemoveContainer" containerID="a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6" Nov 25 09:33:15 crc kubenswrapper[4734]: E1125 09:33:15.753994 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6\": container with ID starting with a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6 not found: ID does not exist" containerID="a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.754047 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6"} err="failed to get container status \"a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6\": rpc error: code = NotFound desc = could not find container \"a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6\": container with ID starting with a4b4d85cb1458e48efabce52a343787f13c4c4079d58ea92b7bd8cab2046ebe6 not found: ID does not exist" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.754096 4734 scope.go:117] "RemoveContainer" containerID="e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a" Nov 25 09:33:15 crc kubenswrapper[4734]: E1125 09:33:15.754568 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a\": container with ID starting with e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a not found: ID does not exist" containerID="e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.754590 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a"} err="failed to get container status \"e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a\": rpc error: code = NotFound desc = could not find container \"e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a\": container with ID starting with e5a56d27e8c0bf6136503d04beba60bbf79b6ae0a40a00efbba4fdc4e30a0c6a not found: ID does not exist" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.754609 4734 scope.go:117] "RemoveContainer" containerID="f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be" Nov 25 09:33:15 crc kubenswrapper[4734]: E1125 09:33:15.754957 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be\": container with ID starting with f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be not found: ID does not exist" containerID="f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.754992 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be"} err="failed to get container status \"f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be\": rpc error: code = NotFound desc = could not find container \"f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be\": container with ID starting with f295ae8f652428e993d0d11bf7b789078ef2e05f171e18319e21cf45363321be not found: ID does not exist" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.755021 4734 scope.go:117] "RemoveContainer" containerID="5d248b85a69b83951e7988d26a2e802ed3c81fd0ec60b23bcfd1e3528caf8de9" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.760829 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9sdhr"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.769605 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9sdhr"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.774375 4734 scope.go:117] "RemoveContainer" containerID="cfcfec3addc5fa6fd2e9a2eb7b9ec13f858730cd082c295bfd2f2f2f4da6cbe0" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.782230 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k7vf4"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.787714 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k7vf4"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.806251 4734 scope.go:117] "RemoveContainer" containerID="a82da392f14bcf0cfdc792aca3434d874cfd495beac41152467c3930fd70367b" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.823430 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nhd9g"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.827716 4734 scope.go:117] "RemoveContainer" containerID="214347f6cbc8dcc6362822b94db806942b7de73b6079fe4500576684b6ee291b" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.832347 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nhd9g"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.836913 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ct4kr"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.853158 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ct4kr"] Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.856980 4734 scope.go:117] "RemoveContainer" containerID="3ff761927b8a016a2da973c7ea931eabc0210601174ecd999943f102231105ee" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.870810 4734 scope.go:117] "RemoveContainer" containerID="892705774a83cb33cac3474472ee1630bdb3247b09ff1653470e2a748545c4af" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.885767 4734 scope.go:117] "RemoveContainer" containerID="c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.901459 4734 scope.go:117] "RemoveContainer" containerID="aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.920146 4734 scope.go:117] "RemoveContainer" containerID="6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.936442 4734 scope.go:117] "RemoveContainer" containerID="c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1" Nov 25 09:33:15 crc kubenswrapper[4734]: E1125 09:33:15.936983 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1\": container with ID starting with c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1 not found: ID does not exist" containerID="c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.937062 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1"} err="failed to get container status \"c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1\": rpc error: code = NotFound desc = could not find container \"c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1\": container with ID starting with c44ecd43474607cef26f731d011393f052d24edc34c1b6e23abbcbc98628c9b1 not found: ID does not exist" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.937173 4734 scope.go:117] "RemoveContainer" containerID="aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6" Nov 25 09:33:15 crc kubenswrapper[4734]: E1125 09:33:15.937746 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6\": container with ID starting with aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6 not found: ID does not exist" containerID="aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.937775 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6"} err="failed to get container status \"aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6\": rpc error: code = NotFound desc = could not find container \"aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6\": container with ID starting with aaa7fc547d0db92bde38c8cee4b91fb14b88f60ed14a7b718c4120d6dbc2d4e6 not found: ID does not exist" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.937791 4734 scope.go:117] "RemoveContainer" containerID="6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773" Nov 25 09:33:15 crc kubenswrapper[4734]: E1125 09:33:15.938049 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773\": container with ID starting with 6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773 not found: ID does not exist" containerID="6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.938163 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773"} err="failed to get container status \"6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773\": rpc error: code = NotFound desc = could not find container \"6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773\": container with ID starting with 6727a66b30ac8e6af4727aaccf18c83930ade785ca9d56f1c19ad2306319d773 not found: ID does not exist" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.938187 4734 scope.go:117] "RemoveContainer" containerID="e2764a3c436f1bed8e0a1ffbc9d440de0d6ac9209a8537ab701359c88ced0523" Nov 25 09:33:15 crc kubenswrapper[4734]: I1125 09:33:15.954469 4734 scope.go:117] "RemoveContainer" containerID="8338b713b5d35ad7b02d28212dc20de4d60fddcb73933b13a22eec5fa0f3dcec" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.254864 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" path="/var/lib/kubelet/pods/09b0af8a-c69a-4ff7-9836-7f6e349c48a4/volumes" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.255556 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cfee19b-29a3-4816-b501-e2420d36b371" path="/var/lib/kubelet/pods/0cfee19b-29a3-4816-b501-e2420d36b371/volumes" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.256360 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64417a45-ee86-4f32-8f68-7d17d9e493cc" path="/var/lib/kubelet/pods/64417a45-ee86-4f32-8f68-7d17d9e493cc/volumes" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.256992 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" path="/var/lib/kubelet/pods/8e35bc80-2357-4dd8-85f0-2a88916f13a8/volumes" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.257952 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9925f447-5834-4ade-bda0-61901cd4af1a" path="/var/lib/kubelet/pods/9925f447-5834-4ade-bda0-61901cd4af1a/volumes" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.258575 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" path="/var/lib/kubelet/pods/accddc60-d025-4bf2-b7f4-fc7cfbc624ed/volumes" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.259196 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" path="/var/lib/kubelet/pods/de135379-0cb7-4ec3-a16a-43e7856a7b7f/volumes" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.602865 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rg94t" event={"ID":"d0ceab4a-1500-4bc7-b5f6-25e89f57bce8","Type":"ContainerStarted","Data":"a372842853113e38e183db4742e12d207f7eda1508413d7f5731612d5b4cc090"} Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.624340 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rg94t" podStartSLOduration=4.159343831 podStartE2EDuration="5.624318012s" podCreationTimestamp="2025-11-25 09:33:11 +0000 UTC" firstStartedPulling="2025-11-25 09:33:14.519804834 +0000 UTC m=+317.330266828" lastFinishedPulling="2025-11-25 09:33:15.984779025 +0000 UTC m=+318.795241009" observedRunningTime="2025-11-25 09:33:16.622280162 +0000 UTC m=+319.432742166" watchObservedRunningTime="2025-11-25 09:33:16.624318012 +0000 UTC m=+319.434780006" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676546 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q4n48"] Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676789 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64417a45-ee86-4f32-8f68-7d17d9e493cc" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676804 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="64417a45-ee86-4f32-8f68-7d17d9e493cc" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676818 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676824 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676832 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64417a45-ee86-4f32-8f68-7d17d9e493cc" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676840 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="64417a45-ee86-4f32-8f68-7d17d9e493cc" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676849 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cfee19b-29a3-4816-b501-e2420d36b371" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676855 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cfee19b-29a3-4816-b501-e2420d36b371" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676862 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676868 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676878 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9925f447-5834-4ade-bda0-61901cd4af1a" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676884 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="9925f447-5834-4ade-bda0-61901cd4af1a" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676891 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676899 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676907 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676913 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676923 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cfee19b-29a3-4816-b501-e2420d36b371" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676929 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cfee19b-29a3-4816-b501-e2420d36b371" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676936 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676941 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676948 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerName="registry-server" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676954 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerName="registry-server" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676962 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerName="registry-server" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676968 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerName="registry-server" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676975 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676981 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.676988 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.676994 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.677001 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9925f447-5834-4ade-bda0-61901cd4af1a" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677006 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="9925f447-5834-4ade-bda0-61901cd4af1a" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: E1125 09:33:16.677014 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677020 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" containerName="extract-utilities" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677129 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="64417a45-ee86-4f32-8f68-7d17d9e493cc" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677139 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="accddc60-d025-4bf2-b7f4-fc7cfbc624ed" containerName="registry-server" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677145 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="9925f447-5834-4ade-bda0-61901cd4af1a" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677155 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e35bc80-2357-4dd8-85f0-2a88916f13a8" containerName="registry-server" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677162 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="09b0af8a-c69a-4ff7-9836-7f6e349c48a4" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677168 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cfee19b-29a3-4816-b501-e2420d36b371" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677177 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="de135379-0cb7-4ec3-a16a-43e7856a7b7f" containerName="extract-content" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.677836 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.680155 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.691485 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q4n48"] Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.781236 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmw82\" (UniqueName: \"kubernetes.io/projected/66ce08e8-5d8b-41bf-b896-8b1c1336299d-kube-api-access-xmw82\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.781454 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66ce08e8-5d8b-41bf-b896-8b1c1336299d-utilities\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.781541 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66ce08e8-5d8b-41bf-b896-8b1c1336299d-catalog-content\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.877250 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fxzr8"] Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.878198 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.880465 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.882655 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmw82\" (UniqueName: \"kubernetes.io/projected/66ce08e8-5d8b-41bf-b896-8b1c1336299d-kube-api-access-xmw82\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.882722 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66ce08e8-5d8b-41bf-b896-8b1c1336299d-utilities\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.882755 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66ce08e8-5d8b-41bf-b896-8b1c1336299d-catalog-content\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.883316 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66ce08e8-5d8b-41bf-b896-8b1c1336299d-utilities\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.883376 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66ce08e8-5d8b-41bf-b896-8b1c1336299d-catalog-content\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.905480 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fxzr8"] Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.908061 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmw82\" (UniqueName: \"kubernetes.io/projected/66ce08e8-5d8b-41bf-b896-8b1c1336299d-kube-api-access-xmw82\") pod \"community-operators-q4n48\" (UID: \"66ce08e8-5d8b-41bf-b896-8b1c1336299d\") " pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.983980 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b27389c9-6b13-421e-adfc-0d290d42974a-catalog-content\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.984035 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9wmx\" (UniqueName: \"kubernetes.io/projected/b27389c9-6b13-421e-adfc-0d290d42974a-kube-api-access-g9wmx\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:16 crc kubenswrapper[4734]: I1125 09:33:16.984097 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b27389c9-6b13-421e-adfc-0d290d42974a-utilities\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.023341 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.085667 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b27389c9-6b13-421e-adfc-0d290d42974a-catalog-content\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.085719 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9wmx\" (UniqueName: \"kubernetes.io/projected/b27389c9-6b13-421e-adfc-0d290d42974a-kube-api-access-g9wmx\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.085748 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b27389c9-6b13-421e-adfc-0d290d42974a-utilities\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.086311 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b27389c9-6b13-421e-adfc-0d290d42974a-utilities\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.086313 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b27389c9-6b13-421e-adfc-0d290d42974a-catalog-content\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.112842 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9wmx\" (UniqueName: \"kubernetes.io/projected/b27389c9-6b13-421e-adfc-0d290d42974a-kube-api-access-g9wmx\") pod \"redhat-marketplace-fxzr8\" (UID: \"b27389c9-6b13-421e-adfc-0d290d42974a\") " pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.193980 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.245180 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q4n48"] Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.415285 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fxzr8"] Nov 25 09:33:17 crc kubenswrapper[4734]: W1125 09:33:17.416745 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb27389c9_6b13_421e_adfc_0d290d42974a.slice/crio-fd60cb7473e6fef5f8e3bf0fe5f6f5c80ae269148d6787c8e9851fbe6b9421e5 WatchSource:0}: Error finding container fd60cb7473e6fef5f8e3bf0fe5f6f5c80ae269148d6787c8e9851fbe6b9421e5: Status 404 returned error can't find the container with id fd60cb7473e6fef5f8e3bf0fe5f6f5c80ae269148d6787c8e9851fbe6b9421e5 Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.613248 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fxzr8" event={"ID":"b27389c9-6b13-421e-adfc-0d290d42974a","Type":"ContainerStarted","Data":"fd60cb7473e6fef5f8e3bf0fe5f6f5c80ae269148d6787c8e9851fbe6b9421e5"} Nov 25 09:33:17 crc kubenswrapper[4734]: I1125 09:33:17.614159 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q4n48" event={"ID":"66ce08e8-5d8b-41bf-b896-8b1c1336299d","Type":"ContainerStarted","Data":"c1c8ebc004033f32a7b754d9c7509590ac207db3613f54932260ecbd61b07162"} Nov 25 09:33:18 crc kubenswrapper[4734]: I1125 09:33:18.621340 4734 generic.go:334] "Generic (PLEG): container finished" podID="b27389c9-6b13-421e-adfc-0d290d42974a" containerID="3e83b2f594b44a57294f54e55d17e9231cb6840af95aaf4eca646bcd1fe29c09" exitCode=0 Nov 25 09:33:18 crc kubenswrapper[4734]: I1125 09:33:18.621432 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fxzr8" event={"ID":"b27389c9-6b13-421e-adfc-0d290d42974a","Type":"ContainerDied","Data":"3e83b2f594b44a57294f54e55d17e9231cb6840af95aaf4eca646bcd1fe29c09"} Nov 25 09:33:18 crc kubenswrapper[4734]: I1125 09:33:18.627640 4734 generic.go:334] "Generic (PLEG): container finished" podID="66ce08e8-5d8b-41bf-b896-8b1c1336299d" containerID="2f3eb2178198013778f726e95a9e8b48d3122bb4047a64e962b02f3be5d8ac91" exitCode=0 Nov 25 09:33:18 crc kubenswrapper[4734]: I1125 09:33:18.627678 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q4n48" event={"ID":"66ce08e8-5d8b-41bf-b896-8b1c1336299d","Type":"ContainerDied","Data":"2f3eb2178198013778f726e95a9e8b48d3122bb4047a64e962b02f3be5d8ac91"} Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.285691 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4lj7p"] Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.288429 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.294406 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.299631 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4lj7p"] Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.315121 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/473a37db-4499-4761-9a44-137ceb74dec4-catalog-content\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.315179 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mtp7\" (UniqueName: \"kubernetes.io/projected/473a37db-4499-4761-9a44-137ceb74dec4-kube-api-access-2mtp7\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.315212 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/473a37db-4499-4761-9a44-137ceb74dec4-utilities\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.415902 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mtp7\" (UniqueName: \"kubernetes.io/projected/473a37db-4499-4761-9a44-137ceb74dec4-kube-api-access-2mtp7\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.416789 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/473a37db-4499-4761-9a44-137ceb74dec4-utilities\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.417030 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/473a37db-4499-4761-9a44-137ceb74dec4-catalog-content\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.417317 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/473a37db-4499-4761-9a44-137ceb74dec4-utilities\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.417578 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/473a37db-4499-4761-9a44-137ceb74dec4-catalog-content\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.436179 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mtp7\" (UniqueName: \"kubernetes.io/projected/473a37db-4499-4761-9a44-137ceb74dec4-kube-api-access-2mtp7\") pod \"redhat-operators-4lj7p\" (UID: \"473a37db-4499-4761-9a44-137ceb74dec4\") " pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.614551 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:19 crc kubenswrapper[4734]: I1125 09:33:19.826504 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4lj7p"] Nov 25 09:33:20 crc kubenswrapper[4734]: I1125 09:33:20.641132 4734 generic.go:334] "Generic (PLEG): container finished" podID="473a37db-4499-4761-9a44-137ceb74dec4" containerID="a44c3173bc229644ae384e3d88eb0703860e1699f466a02795a960c6874a9e1a" exitCode=0 Nov 25 09:33:20 crc kubenswrapper[4734]: I1125 09:33:20.641241 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4lj7p" event={"ID":"473a37db-4499-4761-9a44-137ceb74dec4","Type":"ContainerDied","Data":"a44c3173bc229644ae384e3d88eb0703860e1699f466a02795a960c6874a9e1a"} Nov 25 09:33:20 crc kubenswrapper[4734]: I1125 09:33:20.641411 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4lj7p" event={"ID":"473a37db-4499-4761-9a44-137ceb74dec4","Type":"ContainerStarted","Data":"9dba6621a65942d2a0c66d496fb623c7e8ff835abd6184ab7a863340f35d4af0"} Nov 25 09:33:21 crc kubenswrapper[4734]: I1125 09:33:21.648455 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q4n48" event={"ID":"66ce08e8-5d8b-41bf-b896-8b1c1336299d","Type":"ContainerStarted","Data":"4d7aade17a10d5b305be119613a91b774d9f38bd045ea8e764613567d6ecbde4"} Nov 25 09:33:21 crc kubenswrapper[4734]: I1125 09:33:21.650101 4734 generic.go:334] "Generic (PLEG): container finished" podID="b27389c9-6b13-421e-adfc-0d290d42974a" containerID="9016caaf175c7fa024bfd42e641e8cb809d066e6f6e6044d5437049fb03be0b2" exitCode=0 Nov 25 09:33:21 crc kubenswrapper[4734]: I1125 09:33:21.650140 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fxzr8" event={"ID":"b27389c9-6b13-421e-adfc-0d290d42974a","Type":"ContainerDied","Data":"9016caaf175c7fa024bfd42e641e8cb809d066e6f6e6044d5437049fb03be0b2"} Nov 25 09:33:21 crc kubenswrapper[4734]: I1125 09:33:21.856672 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:21 crc kubenswrapper[4734]: I1125 09:33:21.856724 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:21 crc kubenswrapper[4734]: I1125 09:33:21.895270 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:22 crc kubenswrapper[4734]: I1125 09:33:22.656160 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fxzr8" event={"ID":"b27389c9-6b13-421e-adfc-0d290d42974a","Type":"ContainerStarted","Data":"aa23b9499aa79898586e1fa7ae7555330c1b6f7fc2db75980661671ed7889d1f"} Nov 25 09:33:22 crc kubenswrapper[4734]: I1125 09:33:22.666793 4734 generic.go:334] "Generic (PLEG): container finished" podID="66ce08e8-5d8b-41bf-b896-8b1c1336299d" containerID="4d7aade17a10d5b305be119613a91b774d9f38bd045ea8e764613567d6ecbde4" exitCode=0 Nov 25 09:33:22 crc kubenswrapper[4734]: I1125 09:33:22.666875 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q4n48" event={"ID":"66ce08e8-5d8b-41bf-b896-8b1c1336299d","Type":"ContainerDied","Data":"4d7aade17a10d5b305be119613a91b774d9f38bd045ea8e764613567d6ecbde4"} Nov 25 09:33:22 crc kubenswrapper[4734]: I1125 09:33:22.670170 4734 generic.go:334] "Generic (PLEG): container finished" podID="473a37db-4499-4761-9a44-137ceb74dec4" containerID="ea880cad09a3aef7d5ded02808126d1aebeb339860f92cceaef38081e6f9e8df" exitCode=0 Nov 25 09:33:22 crc kubenswrapper[4734]: I1125 09:33:22.671661 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4lj7p" event={"ID":"473a37db-4499-4761-9a44-137ceb74dec4","Type":"ContainerDied","Data":"ea880cad09a3aef7d5ded02808126d1aebeb339860f92cceaef38081e6f9e8df"} Nov 25 09:33:22 crc kubenswrapper[4734]: I1125 09:33:22.683820 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fxzr8" podStartSLOduration=3.108798665 podStartE2EDuration="6.683798549s" podCreationTimestamp="2025-11-25 09:33:16 +0000 UTC" firstStartedPulling="2025-11-25 09:33:18.622638592 +0000 UTC m=+321.433100586" lastFinishedPulling="2025-11-25 09:33:22.197638476 +0000 UTC m=+325.008100470" observedRunningTime="2025-11-25 09:33:22.682526252 +0000 UTC m=+325.492988246" watchObservedRunningTime="2025-11-25 09:33:22.683798549 +0000 UTC m=+325.494260543" Nov 25 09:33:22 crc kubenswrapper[4734]: I1125 09:33:22.738994 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rg94t" Nov 25 09:33:24 crc kubenswrapper[4734]: I1125 09:33:24.681596 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q4n48" event={"ID":"66ce08e8-5d8b-41bf-b896-8b1c1336299d","Type":"ContainerStarted","Data":"5dbfde97ad1a17e4b2977bceb03d21f600de5a44faf25a6cb2f3320d2c5543cc"} Nov 25 09:33:24 crc kubenswrapper[4734]: I1125 09:33:24.698007 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q4n48" podStartSLOduration=3.8669282149999997 podStartE2EDuration="8.697990667s" podCreationTimestamp="2025-11-25 09:33:16 +0000 UTC" firstStartedPulling="2025-11-25 09:33:18.630122732 +0000 UTC m=+321.440584726" lastFinishedPulling="2025-11-25 09:33:23.461185184 +0000 UTC m=+326.271647178" observedRunningTime="2025-11-25 09:33:24.69707012 +0000 UTC m=+327.507532114" watchObservedRunningTime="2025-11-25 09:33:24.697990667 +0000 UTC m=+327.508452661" Nov 25 09:33:25 crc kubenswrapper[4734]: I1125 09:33:25.691717 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4lj7p" event={"ID":"473a37db-4499-4761-9a44-137ceb74dec4","Type":"ContainerStarted","Data":"eefce1178f16534e495bd1abab00e3504001a35f4c1f5ed532535c0024f090bf"} Nov 25 09:33:25 crc kubenswrapper[4734]: I1125 09:33:25.710651 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4lj7p" podStartSLOduration=2.90964941 podStartE2EDuration="6.710633568s" podCreationTimestamp="2025-11-25 09:33:19 +0000 UTC" firstStartedPulling="2025-11-25 09:33:21.075778448 +0000 UTC m=+323.886240462" lastFinishedPulling="2025-11-25 09:33:24.876762616 +0000 UTC m=+327.687224620" observedRunningTime="2025-11-25 09:33:25.709857715 +0000 UTC m=+328.520319709" watchObservedRunningTime="2025-11-25 09:33:25.710633568 +0000 UTC m=+328.521095562" Nov 25 09:33:27 crc kubenswrapper[4734]: I1125 09:33:27.023441 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:27 crc kubenswrapper[4734]: I1125 09:33:27.023744 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:27 crc kubenswrapper[4734]: I1125 09:33:27.064250 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:27 crc kubenswrapper[4734]: I1125 09:33:27.195286 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:27 crc kubenswrapper[4734]: I1125 09:33:27.195348 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:27 crc kubenswrapper[4734]: I1125 09:33:27.231909 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:27 crc kubenswrapper[4734]: I1125 09:33:27.749729 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fxzr8" Nov 25 09:33:29 crc kubenswrapper[4734]: I1125 09:33:29.615490 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:29 crc kubenswrapper[4734]: I1125 09:33:29.615743 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:29 crc kubenswrapper[4734]: I1125 09:33:29.679669 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:33:30 crc kubenswrapper[4734]: I1125 09:33:30.570931 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" podUID="fd882e25-5875-43ed-9d01-34b92fe44587" containerName="oauth-openshift" containerID="cri-o://4ca1a8a25d52043eba1d96f1b2e1306a8873e6c6cf2d1733f1bbe22175af50ae" gracePeriod=15 Nov 25 09:33:32 crc kubenswrapper[4734]: I1125 09:33:32.719062 4734 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-xtsd4 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.11:6443/healthz\": dial tcp 10.217.0.11:6443: connect: connection refused" start-of-body= Nov 25 09:33:32 crc kubenswrapper[4734]: I1125 09:33:32.719383 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" podUID="fd882e25-5875-43ed-9d01-34b92fe44587" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.11:6443/healthz\": dial tcp 10.217.0.11:6443: connect: connection refused" Nov 25 09:33:33 crc kubenswrapper[4734]: I1125 09:33:33.747749 4734 generic.go:334] "Generic (PLEG): container finished" podID="fd882e25-5875-43ed-9d01-34b92fe44587" containerID="4ca1a8a25d52043eba1d96f1b2e1306a8873e6c6cf2d1733f1bbe22175af50ae" exitCode=0 Nov 25 09:33:33 crc kubenswrapper[4734]: I1125 09:33:33.747811 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" event={"ID":"fd882e25-5875-43ed-9d01-34b92fe44587","Type":"ContainerDied","Data":"4ca1a8a25d52043eba1d96f1b2e1306a8873e6c6cf2d1733f1bbe22175af50ae"} Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.514751 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.552831 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-5f79ddd96-zwnq5"] Nov 25 09:33:35 crc kubenswrapper[4734]: E1125 09:33:35.553973 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd882e25-5875-43ed-9d01-34b92fe44587" containerName="oauth-openshift" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.554026 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd882e25-5875-43ed-9d01-34b92fe44587" containerName="oauth-openshift" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.554244 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd882e25-5875-43ed-9d01-34b92fe44587" containerName="oauth-openshift" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.554978 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.560868 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5f79ddd96-zwnq5"] Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621281 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-trusted-ca-bundle\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621342 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-cliconfig\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621384 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-audit-policies\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621419 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-service-ca\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621442 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khvh2\" (UniqueName: \"kubernetes.io/projected/fd882e25-5875-43ed-9d01-34b92fe44587-kube-api-access-khvh2\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621466 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-session\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621805 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-login\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621854 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-idp-0-file-data\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621877 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fd882e25-5875-43ed-9d01-34b92fe44587-audit-dir\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621911 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-serving-cert\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621938 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-error\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.621964 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-router-certs\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622001 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-ocp-branding-template\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622019 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-provider-selection\") pod \"fd882e25-5875-43ed-9d01-34b92fe44587\" (UID: \"fd882e25-5875-43ed-9d01-34b92fe44587\") " Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622053 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622064 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622136 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622207 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622242 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-router-certs\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622293 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-error\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622299 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fd882e25-5875-43ed-9d01-34b92fe44587-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622353 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-audit-dir\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622391 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-service-ca\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622408 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-session\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622432 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622453 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622499 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622546 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-login\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622609 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-audit-policies\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622636 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622661 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zp8dg\" (UniqueName: \"kubernetes.io/projected/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-kube-api-access-zp8dg\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622680 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622720 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622756 4734 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622766 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622778 4734 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fd882e25-5875-43ed-9d01-34b92fe44587-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622788 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.622797 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.627345 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.629103 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.629182 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd882e25-5875-43ed-9d01-34b92fe44587-kube-api-access-khvh2" (OuterVolumeSpecName: "kube-api-access-khvh2") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "kube-api-access-khvh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.629518 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.723937 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724002 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-router-certs\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724072 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-error\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724127 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-audit-dir\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724158 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-session\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724182 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-service-ca\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724204 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724229 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724251 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724283 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-login\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724320 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-audit-policies\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724345 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724376 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724372 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-audit-dir\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724398 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zp8dg\" (UniqueName: \"kubernetes.io/projected/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-kube-api-access-zp8dg\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724780 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724807 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khvh2\" (UniqueName: \"kubernetes.io/projected/fd882e25-5875-43ed-9d01-34b92fe44587-kube-api-access-khvh2\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724824 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724837 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.724973 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-service-ca\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.725074 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.725309 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-audit-policies\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.725543 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.727862 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.728422 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-session\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.728467 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.728514 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-router-certs\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.728677 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.729485 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-error\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.729850 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-user-template-login\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.730492 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.732623 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.732873 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.735799 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.735912 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.736208 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "fd882e25-5875-43ed-9d01-34b92fe44587" (UID: "fd882e25-5875-43ed-9d01-34b92fe44587"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.740798 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zp8dg\" (UniqueName: \"kubernetes.io/projected/b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae-kube-api-access-zp8dg\") pod \"oauth-openshift-5f79ddd96-zwnq5\" (UID: \"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae\") " pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.760813 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" event={"ID":"fd882e25-5875-43ed-9d01-34b92fe44587","Type":"ContainerDied","Data":"4beb5f1cfbf01a7281fa5dc7cf0b08de6a504246fe718e7239776729e42326f3"} Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.760876 4734 scope.go:117] "RemoveContainer" containerID="4ca1a8a25d52043eba1d96f1b2e1306a8873e6c6cf2d1733f1bbe22175af50ae" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.761006 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-xtsd4" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.789968 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-xtsd4"] Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.794988 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-xtsd4"] Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.825306 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.825343 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.825359 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.825375 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.825388 4734 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fd882e25-5875-43ed-9d01-34b92fe44587-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 09:33:35 crc kubenswrapper[4734]: I1125 09:33:35.904582 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:36 crc kubenswrapper[4734]: I1125 09:33:36.236526 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5f79ddd96-zwnq5"] Nov 25 09:33:36 crc kubenswrapper[4734]: W1125 09:33:36.247913 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb9458bed_8729_44cc_a0c1_7c8e4b2ab6ae.slice/crio-ea8b7961aee28b232b1d4d7bd7ea6c3a9121c25e981e2ebbe36eeb2734300b18 WatchSource:0}: Error finding container ea8b7961aee28b232b1d4d7bd7ea6c3a9121c25e981e2ebbe36eeb2734300b18: Status 404 returned error can't find the container with id ea8b7961aee28b232b1d4d7bd7ea6c3a9121c25e981e2ebbe36eeb2734300b18 Nov 25 09:33:36 crc kubenswrapper[4734]: I1125 09:33:36.254147 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd882e25-5875-43ed-9d01-34b92fe44587" path="/var/lib/kubelet/pods/fd882e25-5875-43ed-9d01-34b92fe44587/volumes" Nov 25 09:33:36 crc kubenswrapper[4734]: I1125 09:33:36.770032 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" event={"ID":"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae","Type":"ContainerStarted","Data":"2a3a0f32953a35af305661017f52e7f3c17aa95014802626b8bbd6ee1d162e75"} Nov 25 09:33:36 crc kubenswrapper[4734]: I1125 09:33:36.770365 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" event={"ID":"b9458bed-8729-44cc-a0c1-7c8e4b2ab6ae","Type":"ContainerStarted","Data":"ea8b7961aee28b232b1d4d7bd7ea6c3a9121c25e981e2ebbe36eeb2734300b18"} Nov 25 09:33:36 crc kubenswrapper[4734]: I1125 09:33:36.771317 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:36 crc kubenswrapper[4734]: I1125 09:33:36.794550 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" podStartSLOduration=31.794530622 podStartE2EDuration="31.794530622s" podCreationTimestamp="2025-11-25 09:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:33:36.791503213 +0000 UTC m=+339.601965217" watchObservedRunningTime="2025-11-25 09:33:36.794530622 +0000 UTC m=+339.604992616" Nov 25 09:33:37 crc kubenswrapper[4734]: I1125 09:33:37.068597 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q4n48" Nov 25 09:33:37 crc kubenswrapper[4734]: I1125 09:33:37.173126 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-5f79ddd96-zwnq5" Nov 25 09:33:39 crc kubenswrapper[4734]: I1125 09:33:39.658620 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4lj7p" Nov 25 09:34:50 crc kubenswrapper[4734]: I1125 09:34:50.695914 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:34:50 crc kubenswrapper[4734]: I1125 09:34:50.696479 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:35:20 crc kubenswrapper[4734]: I1125 09:35:20.695617 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:35:20 crc kubenswrapper[4734]: I1125 09:35:20.696246 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:35:50 crc kubenswrapper[4734]: I1125 09:35:50.695599 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:35:50 crc kubenswrapper[4734]: I1125 09:35:50.696302 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:35:50 crc kubenswrapper[4734]: I1125 09:35:50.696359 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:35:50 crc kubenswrapper[4734]: I1125 09:35:50.696908 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5eec5927f80d5fed350fbc6c560ee959f6b791701d55a7d8a01b4fa5fe3c1b43"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:35:50 crc kubenswrapper[4734]: I1125 09:35:50.696970 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://5eec5927f80d5fed350fbc6c560ee959f6b791701d55a7d8a01b4fa5fe3c1b43" gracePeriod=600 Nov 25 09:35:51 crc kubenswrapper[4734]: I1125 09:35:51.540789 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="5eec5927f80d5fed350fbc6c560ee959f6b791701d55a7d8a01b4fa5fe3c1b43" exitCode=0 Nov 25 09:35:51 crc kubenswrapper[4734]: I1125 09:35:51.540829 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"5eec5927f80d5fed350fbc6c560ee959f6b791701d55a7d8a01b4fa5fe3c1b43"} Nov 25 09:35:51 crc kubenswrapper[4734]: I1125 09:35:51.541421 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"e340e6ae7359f944d27daa5c3a4cb8f7f0ef52834b6c932f518c18e151b43653"} Nov 25 09:35:51 crc kubenswrapper[4734]: I1125 09:35:51.541445 4734 scope.go:117] "RemoveContainer" containerID="c1439ca12436bc4ff0c72991367590123d2f1966c96b17e3e92f079bb8595c45" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.242917 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-9bt9w"] Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.244315 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.261506 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-9bt9w"] Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.355583 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-bound-sa-token\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.355747 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-registry-tls\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.355793 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9506c702-e9d0-4097-ba0a-35384d088233-registry-certificates\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.355832 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4pxs\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-kube-api-access-q4pxs\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.355903 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9506c702-e9d0-4097-ba0a-35384d088233-trusted-ca\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.356008 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.356064 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9506c702-e9d0-4097-ba0a-35384d088233-installation-pull-secrets\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.356220 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9506c702-e9d0-4097-ba0a-35384d088233-ca-trust-extracted\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.382793 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.457755 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9506c702-e9d0-4097-ba0a-35384d088233-ca-trust-extracted\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.457813 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-bound-sa-token\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.457833 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-registry-tls\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.457851 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9506c702-e9d0-4097-ba0a-35384d088233-registry-certificates\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.457871 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4pxs\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-kube-api-access-q4pxs\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.457902 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9506c702-e9d0-4097-ba0a-35384d088233-trusted-ca\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.457923 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9506c702-e9d0-4097-ba0a-35384d088233-installation-pull-secrets\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.458349 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9506c702-e9d0-4097-ba0a-35384d088233-ca-trust-extracted\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.459273 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9506c702-e9d0-4097-ba0a-35384d088233-trusted-ca\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.459362 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9506c702-e9d0-4097-ba0a-35384d088233-registry-certificates\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.464587 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9506c702-e9d0-4097-ba0a-35384d088233-installation-pull-secrets\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.464899 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-registry-tls\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.480124 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-bound-sa-token\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.488948 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4pxs\" (UniqueName: \"kubernetes.io/projected/9506c702-e9d0-4097-ba0a-35384d088233-kube-api-access-q4pxs\") pod \"image-registry-66df7c8f76-9bt9w\" (UID: \"9506c702-e9d0-4097-ba0a-35384d088233\") " pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.558805 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.802155 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-9bt9w"] Nov 25 09:36:38 crc kubenswrapper[4734]: I1125 09:36:38.830409 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" event={"ID":"9506c702-e9d0-4097-ba0a-35384d088233","Type":"ContainerStarted","Data":"9eb688812be353c893c5f7de12358a400667874507cf25a0c6a213ea5fa2cbb6"} Nov 25 09:36:39 crc kubenswrapper[4734]: I1125 09:36:39.838102 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" event={"ID":"9506c702-e9d0-4097-ba0a-35384d088233","Type":"ContainerStarted","Data":"067482f611631e7cb5b07d1e0772837963a9ca009e3b22e7b740200f63cc5383"} Nov 25 09:36:39 crc kubenswrapper[4734]: I1125 09:36:39.839194 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:39 crc kubenswrapper[4734]: I1125 09:36:39.866356 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" podStartSLOduration=1.8663289189999999 podStartE2EDuration="1.866328919s" podCreationTimestamp="2025-11-25 09:36:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:36:39.86228928 +0000 UTC m=+522.672751284" watchObservedRunningTime="2025-11-25 09:36:39.866328919 +0000 UTC m=+522.676790943" Nov 25 09:36:58 crc kubenswrapper[4734]: I1125 09:36:58.569774 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-9bt9w" Nov 25 09:36:58 crc kubenswrapper[4734]: I1125 09:36:58.651375 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pmwh4"] Nov 25 09:37:23 crc kubenswrapper[4734]: I1125 09:37:23.689902 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" podUID="8579ba87-e7d7-41e0-8ca6-3beaee3dd354" containerName="registry" containerID="cri-o://944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702" gracePeriod=30 Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.017942 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.076716 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-trusted-ca\") pod \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.076763 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-bound-sa-token\") pod \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.076791 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5pjsj\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-kube-api-access-5pjsj\") pod \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.076879 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-installation-pull-secrets\") pod \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.076902 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-certificates\") pod \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.077176 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.077367 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-ca-trust-extracted\") pod \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.077439 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-tls\") pod \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\" (UID: \"8579ba87-e7d7-41e0-8ca6-3beaee3dd354\") " Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.078163 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8579ba87-e7d7-41e0-8ca6-3beaee3dd354" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.078339 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8579ba87-e7d7-41e0-8ca6-3beaee3dd354" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.082444 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-kube-api-access-5pjsj" (OuterVolumeSpecName: "kube-api-access-5pjsj") pod "8579ba87-e7d7-41e0-8ca6-3beaee3dd354" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354"). InnerVolumeSpecName "kube-api-access-5pjsj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.082670 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8579ba87-e7d7-41e0-8ca6-3beaee3dd354" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.084920 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8579ba87-e7d7-41e0-8ca6-3beaee3dd354" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.089514 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "8579ba87-e7d7-41e0-8ca6-3beaee3dd354" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.092134 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8579ba87-e7d7-41e0-8ca6-3beaee3dd354" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.093222 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8579ba87-e7d7-41e0-8ca6-3beaee3dd354" (UID: "8579ba87-e7d7-41e0-8ca6-3beaee3dd354"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.189886 4734 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.189945 4734 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.189967 4734 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.189986 4734 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.190023 4734 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.190041 4734 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.190058 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5pjsj\" (UniqueName: \"kubernetes.io/projected/8579ba87-e7d7-41e0-8ca6-3beaee3dd354-kube-api-access-5pjsj\") on node \"crc\" DevicePath \"\"" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.359048 4734 generic.go:334] "Generic (PLEG): container finished" podID="8579ba87-e7d7-41e0-8ca6-3beaee3dd354" containerID="944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702" exitCode=0 Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.359234 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" event={"ID":"8579ba87-e7d7-41e0-8ca6-3beaee3dd354","Type":"ContainerDied","Data":"944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702"} Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.359527 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" event={"ID":"8579ba87-e7d7-41e0-8ca6-3beaee3dd354","Type":"ContainerDied","Data":"7c97b9a69a0d0001ec872e8f995f5f6571cac619f74b90c0cce5e0aa6b852de6"} Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.359551 4734 scope.go:117] "RemoveContainer" containerID="944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.359307 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-pmwh4" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.380769 4734 scope.go:117] "RemoveContainer" containerID="944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.380918 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pmwh4"] Nov 25 09:37:24 crc kubenswrapper[4734]: E1125 09:37:24.381825 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702\": container with ID starting with 944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702 not found: ID does not exist" containerID="944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.381875 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702"} err="failed to get container status \"944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702\": rpc error: code = NotFound desc = could not find container \"944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702\": container with ID starting with 944dbafdd53ac46b2fc76530064743d6552b39338f6186676cbfa677ae306702 not found: ID does not exist" Nov 25 09:37:24 crc kubenswrapper[4734]: I1125 09:37:24.386580 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pmwh4"] Nov 25 09:37:26 crc kubenswrapper[4734]: I1125 09:37:26.254951 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8579ba87-e7d7-41e0-8ca6-3beaee3dd354" path="/var/lib/kubelet/pods/8579ba87-e7d7-41e0-8ca6-3beaee3dd354/volumes" Nov 25 09:37:50 crc kubenswrapper[4734]: I1125 09:37:50.696005 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:37:50 crc kubenswrapper[4734]: I1125 09:37:50.696455 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:38:05 crc kubenswrapper[4734]: I1125 09:38:05.699826 4734 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rvq8p container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.54:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:38:05 crc kubenswrapper[4734]: I1125 09:38:05.700645 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" podUID="c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.54:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:38:05 crc kubenswrapper[4734]: I1125 09:38:05.699860 4734 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rvq8p container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.54:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:38:05 crc kubenswrapper[4734]: I1125 09:38:05.701071 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-rvq8p" podUID="c4b3d5c9-6b58-4f1a-bfb2-c39d7a00c746" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.54:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:38:20 crc kubenswrapper[4734]: I1125 09:38:20.695765 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:38:20 crc kubenswrapper[4734]: I1125 09:38:20.696246 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:38:50 crc kubenswrapper[4734]: I1125 09:38:50.695459 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:38:50 crc kubenswrapper[4734]: I1125 09:38:50.696897 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:38:50 crc kubenswrapper[4734]: I1125 09:38:50.696984 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:38:50 crc kubenswrapper[4734]: I1125 09:38:50.697656 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e340e6ae7359f944d27daa5c3a4cb8f7f0ef52834b6c932f518c18e151b43653"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:38:50 crc kubenswrapper[4734]: I1125 09:38:50.697733 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://e340e6ae7359f944d27daa5c3a4cb8f7f0ef52834b6c932f518c18e151b43653" gracePeriod=600 Nov 25 09:38:51 crc kubenswrapper[4734]: I1125 09:38:51.000554 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="e340e6ae7359f944d27daa5c3a4cb8f7f0ef52834b6c932f518c18e151b43653" exitCode=0 Nov 25 09:38:51 crc kubenswrapper[4734]: I1125 09:38:51.000622 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"e340e6ae7359f944d27daa5c3a4cb8f7f0ef52834b6c932f518c18e151b43653"} Nov 25 09:38:51 crc kubenswrapper[4734]: I1125 09:38:51.001001 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"0a9c180e121b5cb8f4c6eca924f66d6e13dbe5f0459c72e12dd199f92062a51f"} Nov 25 09:38:51 crc kubenswrapper[4734]: I1125 09:38:51.001022 4734 scope.go:117] "RemoveContainer" containerID="5eec5927f80d5fed350fbc6c560ee959f6b791701d55a7d8a01b4fa5fe3c1b43" Nov 25 09:40:43 crc kubenswrapper[4734]: I1125 09:40:43.591751 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b78xz"] Nov 25 09:40:43 crc kubenswrapper[4734]: I1125 09:40:43.592602 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" podUID="b4504e14-e8ec-4fea-acff-0848c20861b0" containerName="controller-manager" containerID="cri-o://be5f669b3f9f6c2f98d367635c875de0c8a33299e2a7172710f729181d76661a" gracePeriod=30 Nov 25 09:40:43 crc kubenswrapper[4734]: I1125 09:40:43.705660 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm"] Nov 25 09:40:43 crc kubenswrapper[4734]: I1125 09:40:43.705949 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" podUID="b15a18b7-d1cb-4054-9e79-89e2681747f2" containerName="route-controller-manager" containerID="cri-o://1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210" gracePeriod=30 Nov 25 09:40:43 crc kubenswrapper[4734]: I1125 09:40:43.724547 4734 generic.go:334] "Generic (PLEG): container finished" podID="b4504e14-e8ec-4fea-acff-0848c20861b0" containerID="be5f669b3f9f6c2f98d367635c875de0c8a33299e2a7172710f729181d76661a" exitCode=0 Nov 25 09:40:43 crc kubenswrapper[4734]: I1125 09:40:43.724590 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" event={"ID":"b4504e14-e8ec-4fea-acff-0848c20861b0","Type":"ContainerDied","Data":"be5f669b3f9f6c2f98d367635c875de0c8a33299e2a7172710f729181d76661a"} Nov 25 09:40:43 crc kubenswrapper[4734]: I1125 09:40:43.954470 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.037427 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.127221 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4504e14-e8ec-4fea-acff-0848c20861b0-serving-cert\") pod \"b4504e14-e8ec-4fea-acff-0848c20861b0\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.127284 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-config\") pod \"b4504e14-e8ec-4fea-acff-0848c20861b0\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.127376 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-client-ca\") pod \"b4504e14-e8ec-4fea-acff-0848c20861b0\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.127433 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz4dg\" (UniqueName: \"kubernetes.io/projected/b4504e14-e8ec-4fea-acff-0848c20861b0-kube-api-access-rz4dg\") pod \"b4504e14-e8ec-4fea-acff-0848c20861b0\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.127462 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-proxy-ca-bundles\") pod \"b4504e14-e8ec-4fea-acff-0848c20861b0\" (UID: \"b4504e14-e8ec-4fea-acff-0848c20861b0\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.128671 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-client-ca" (OuterVolumeSpecName: "client-ca") pod "b4504e14-e8ec-4fea-acff-0848c20861b0" (UID: "b4504e14-e8ec-4fea-acff-0848c20861b0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.128686 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "b4504e14-e8ec-4fea-acff-0848c20861b0" (UID: "b4504e14-e8ec-4fea-acff-0848c20861b0"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.128859 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-config" (OuterVolumeSpecName: "config") pod "b4504e14-e8ec-4fea-acff-0848c20861b0" (UID: "b4504e14-e8ec-4fea-acff-0848c20861b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.133272 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4504e14-e8ec-4fea-acff-0848c20861b0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b4504e14-e8ec-4fea-acff-0848c20861b0" (UID: "b4504e14-e8ec-4fea-acff-0848c20861b0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.133293 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4504e14-e8ec-4fea-acff-0848c20861b0-kube-api-access-rz4dg" (OuterVolumeSpecName: "kube-api-access-rz4dg") pod "b4504e14-e8ec-4fea-acff-0848c20861b0" (UID: "b4504e14-e8ec-4fea-acff-0848c20861b0"). InnerVolumeSpecName "kube-api-access-rz4dg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.228873 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b15a18b7-d1cb-4054-9e79-89e2681747f2-serving-cert\") pod \"b15a18b7-d1cb-4054-9e79-89e2681747f2\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229013 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-config\") pod \"b15a18b7-d1cb-4054-9e79-89e2681747f2\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229057 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvd6t\" (UniqueName: \"kubernetes.io/projected/b15a18b7-d1cb-4054-9e79-89e2681747f2-kube-api-access-fvd6t\") pod \"b15a18b7-d1cb-4054-9e79-89e2681747f2\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229078 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-client-ca\") pod \"b15a18b7-d1cb-4054-9e79-89e2681747f2\" (UID: \"b15a18b7-d1cb-4054-9e79-89e2681747f2\") " Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229289 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rz4dg\" (UniqueName: \"kubernetes.io/projected/b4504e14-e8ec-4fea-acff-0848c20861b0-kube-api-access-rz4dg\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229301 4734 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229312 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4504e14-e8ec-4fea-acff-0848c20861b0-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229321 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229331 4734 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4504e14-e8ec-4fea-acff-0848c20861b0-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229941 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-client-ca" (OuterVolumeSpecName: "client-ca") pod "b15a18b7-d1cb-4054-9e79-89e2681747f2" (UID: "b15a18b7-d1cb-4054-9e79-89e2681747f2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.229961 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-config" (OuterVolumeSpecName: "config") pod "b15a18b7-d1cb-4054-9e79-89e2681747f2" (UID: "b15a18b7-d1cb-4054-9e79-89e2681747f2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.233039 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b15a18b7-d1cb-4054-9e79-89e2681747f2-kube-api-access-fvd6t" (OuterVolumeSpecName: "kube-api-access-fvd6t") pod "b15a18b7-d1cb-4054-9e79-89e2681747f2" (UID: "b15a18b7-d1cb-4054-9e79-89e2681747f2"). InnerVolumeSpecName "kube-api-access-fvd6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.233921 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b15a18b7-d1cb-4054-9e79-89e2681747f2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b15a18b7-d1cb-4054-9e79-89e2681747f2" (UID: "b15a18b7-d1cb-4054-9e79-89e2681747f2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.329910 4734 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.329939 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvd6t\" (UniqueName: \"kubernetes.io/projected/b15a18b7-d1cb-4054-9e79-89e2681747f2-kube-api-access-fvd6t\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.329950 4734 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b15a18b7-d1cb-4054-9e79-89e2681747f2-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.329959 4734 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b15a18b7-d1cb-4054-9e79-89e2681747f2-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.730461 4734 generic.go:334] "Generic (PLEG): container finished" podID="b15a18b7-d1cb-4054-9e79-89e2681747f2" containerID="1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210" exitCode=0 Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.730501 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" event={"ID":"b15a18b7-d1cb-4054-9e79-89e2681747f2","Type":"ContainerDied","Data":"1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210"} Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.730542 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.730724 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm" event={"ID":"b15a18b7-d1cb-4054-9e79-89e2681747f2","Type":"ContainerDied","Data":"dfce1cc5879b7ccc79aba96bdb9995c70d9559ddb1bb0604479aa3bd6e17f287"} Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.730749 4734 scope.go:117] "RemoveContainer" containerID="1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.732134 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" event={"ID":"b4504e14-e8ec-4fea-acff-0848c20861b0","Type":"ContainerDied","Data":"10c76dc2891e398a678fe64c7fb3f53cc1daff024a1e40a62a7b68934687cb54"} Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.732179 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-b78xz" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.749768 4734 scope.go:117] "RemoveContainer" containerID="1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210" Nov 25 09:40:44 crc kubenswrapper[4734]: E1125 09:40:44.750419 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210\": container with ID starting with 1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210 not found: ID does not exist" containerID="1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.750466 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210"} err="failed to get container status \"1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210\": rpc error: code = NotFound desc = could not find container \"1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210\": container with ID starting with 1c6141bc08d3ad125f6b284067ceb3eec53ad8ba5b7162ef1ed5702d253e0210 not found: ID does not exist" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.750495 4734 scope.go:117] "RemoveContainer" containerID="be5f669b3f9f6c2f98d367635c875de0c8a33299e2a7172710f729181d76661a" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.750987 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm"] Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.755875 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-46nzm"] Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.763797 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b78xz"] Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.770118 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b78xz"] Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.810238 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-557d587546-7pqt8"] Nov 25 09:40:44 crc kubenswrapper[4734]: E1125 09:40:44.810487 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4504e14-e8ec-4fea-acff-0848c20861b0" containerName="controller-manager" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.810499 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4504e14-e8ec-4fea-acff-0848c20861b0" containerName="controller-manager" Nov 25 09:40:44 crc kubenswrapper[4734]: E1125 09:40:44.810517 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8579ba87-e7d7-41e0-8ca6-3beaee3dd354" containerName="registry" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.810523 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8579ba87-e7d7-41e0-8ca6-3beaee3dd354" containerName="registry" Nov 25 09:40:44 crc kubenswrapper[4734]: E1125 09:40:44.815334 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b15a18b7-d1cb-4054-9e79-89e2681747f2" containerName="route-controller-manager" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.815385 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15a18b7-d1cb-4054-9e79-89e2681747f2" containerName="route-controller-manager" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.815597 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="b15a18b7-d1cb-4054-9e79-89e2681747f2" containerName="route-controller-manager" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.815616 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="8579ba87-e7d7-41e0-8ca6-3beaee3dd354" containerName="registry" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.815632 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4504e14-e8ec-4fea-acff-0848c20861b0" containerName="controller-manager" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.815955 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m"] Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.816190 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.816504 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.821809 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.821999 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.822401 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.822578 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.822725 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.829220 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-557d587546-7pqt8"] Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.829407 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m"] Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.830955 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.831219 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.831508 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.831534 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.831627 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.831673 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.832297 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.857014 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953028 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jw6t\" (UniqueName: \"kubernetes.io/projected/7f0815df-11e8-4fd0-90fa-c878a6ca5646-kube-api-access-9jw6t\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953113 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af860f8f-8503-414f-bec2-7c265ebe7003-serving-cert\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953448 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkjf4\" (UniqueName: \"kubernetes.io/projected/af860f8f-8503-414f-bec2-7c265ebe7003-kube-api-access-fkjf4\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953509 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-client-ca\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953538 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f0815df-11e8-4fd0-90fa-c878a6ca5646-config\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953559 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f0815df-11e8-4fd0-90fa-c878a6ca5646-client-ca\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953577 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-proxy-ca-bundles\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953728 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f0815df-11e8-4fd0-90fa-c878a6ca5646-serving-cert\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:44 crc kubenswrapper[4734]: I1125 09:40:44.953826 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-config\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.055042 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-config\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.055451 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jw6t\" (UniqueName: \"kubernetes.io/projected/7f0815df-11e8-4fd0-90fa-c878a6ca5646-kube-api-access-9jw6t\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.055572 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af860f8f-8503-414f-bec2-7c265ebe7003-serving-cert\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.055683 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkjf4\" (UniqueName: \"kubernetes.io/projected/af860f8f-8503-414f-bec2-7c265ebe7003-kube-api-access-fkjf4\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.055789 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-client-ca\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.055892 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f0815df-11e8-4fd0-90fa-c878a6ca5646-config\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.055994 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f0815df-11e8-4fd0-90fa-c878a6ca5646-client-ca\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.056109 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-proxy-ca-bundles\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.056219 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f0815df-11e8-4fd0-90fa-c878a6ca5646-serving-cert\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.056588 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-config\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.057233 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f0815df-11e8-4fd0-90fa-c878a6ca5646-client-ca\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.057378 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-client-ca\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.057691 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f0815df-11e8-4fd0-90fa-c878a6ca5646-config\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.057702 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/af860f8f-8503-414f-bec2-7c265ebe7003-proxy-ca-bundles\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.061322 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f0815df-11e8-4fd0-90fa-c878a6ca5646-serving-cert\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.061832 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af860f8f-8503-414f-bec2-7c265ebe7003-serving-cert\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.074156 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jw6t\" (UniqueName: \"kubernetes.io/projected/7f0815df-11e8-4fd0-90fa-c878a6ca5646-kube-api-access-9jw6t\") pod \"route-controller-manager-7bcd7d9658-q464m\" (UID: \"7f0815df-11e8-4fd0-90fa-c878a6ca5646\") " pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.075855 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkjf4\" (UniqueName: \"kubernetes.io/projected/af860f8f-8503-414f-bec2-7c265ebe7003-kube-api-access-fkjf4\") pod \"controller-manager-557d587546-7pqt8\" (UID: \"af860f8f-8503-414f-bec2-7c265ebe7003\") " pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.157008 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.165798 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.361452 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-557d587546-7pqt8"] Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.414633 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m"] Nov 25 09:40:45 crc kubenswrapper[4734]: W1125 09:40:45.420392 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f0815df_11e8_4fd0_90fa_c878a6ca5646.slice/crio-8ed4e66feacce9c02583f0a3581fdecaa4b2a6044c5a4a00b6bf19185a64c932 WatchSource:0}: Error finding container 8ed4e66feacce9c02583f0a3581fdecaa4b2a6044c5a4a00b6bf19185a64c932: Status 404 returned error can't find the container with id 8ed4e66feacce9c02583f0a3581fdecaa4b2a6044c5a4a00b6bf19185a64c932 Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.740690 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" event={"ID":"7f0815df-11e8-4fd0-90fa-c878a6ca5646","Type":"ContainerStarted","Data":"b6ae943285a4bb4e0dd77af467a98e959691ad4b06f5d146479f8e5f901220b1"} Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.741200 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" event={"ID":"7f0815df-11e8-4fd0-90fa-c878a6ca5646","Type":"ContainerStarted","Data":"8ed4e66feacce9c02583f0a3581fdecaa4b2a6044c5a4a00b6bf19185a64c932"} Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.743175 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" event={"ID":"af860f8f-8503-414f-bec2-7c265ebe7003","Type":"ContainerStarted","Data":"ca1b72e87c319d7e80d0f5748276ae3a0f6e4be53281dcfb0d98fb48c9db45f9"} Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.743262 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" event={"ID":"af860f8f-8503-414f-bec2-7c265ebe7003","Type":"ContainerStarted","Data":"41b36a731d109d1117a5b50e00462870ad3e8ffddab7360e9725814b6b3760fd"} Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.743415 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.754164 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" Nov 25 09:40:45 crc kubenswrapper[4734]: I1125 09:40:45.765047 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" podStartSLOduration=2.7650246689999998 podStartE2EDuration="2.765024669s" podCreationTimestamp="2025-11-25 09:40:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:40:45.764413171 +0000 UTC m=+768.574875165" watchObservedRunningTime="2025-11-25 09:40:45.765024669 +0000 UTC m=+768.575486663" Nov 25 09:40:46 crc kubenswrapper[4734]: I1125 09:40:46.258889 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b15a18b7-d1cb-4054-9e79-89e2681747f2" path="/var/lib/kubelet/pods/b15a18b7-d1cb-4054-9e79-89e2681747f2/volumes" Nov 25 09:40:46 crc kubenswrapper[4734]: I1125 09:40:46.259649 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4504e14-e8ec-4fea-acff-0848c20861b0" path="/var/lib/kubelet/pods/b4504e14-e8ec-4fea-acff-0848c20861b0/volumes" Nov 25 09:40:46 crc kubenswrapper[4734]: I1125 09:40:46.748694 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:46 crc kubenswrapper[4734]: I1125 09:40:46.754249 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7bcd7d9658-q464m" Nov 25 09:40:46 crc kubenswrapper[4734]: I1125 09:40:46.775725 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-557d587546-7pqt8" podStartSLOduration=3.775709248 podStartE2EDuration="3.775709248s" podCreationTimestamp="2025-11-25 09:40:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:40:45.794337096 +0000 UTC m=+768.604799100" watchObservedRunningTime="2025-11-25 09:40:46.775709248 +0000 UTC m=+769.586171252" Nov 25 09:40:50 crc kubenswrapper[4734]: I1125 09:40:50.696112 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:40:50 crc kubenswrapper[4734]: I1125 09:40:50.696674 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:40:50 crc kubenswrapper[4734]: I1125 09:40:50.724727 4734 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.466805 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2vvjj"] Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.467542 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovn-controller" containerID="cri-o://e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5" gracePeriod=30 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.467629 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="sbdb" containerID="cri-o://37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e" gracePeriod=30 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.467690 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40" gracePeriod=30 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.467671 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovn-acl-logging" containerID="cri-o://834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864" gracePeriod=30 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.467656 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kube-rbac-proxy-node" containerID="cri-o://c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778" gracePeriod=30 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.467818 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="northd" containerID="cri-o://9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8" gracePeriod=30 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.467753 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="nbdb" containerID="cri-o://88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583" gracePeriod=30 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.505636 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" containerID="cri-o://43fd484a9960544cf74472150ca51b7c7efdc7a3c46fe5c2f855fde6e0cc5b04" gracePeriod=30 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.810874 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/2.log" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.811563 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/1.log" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.811601 4734 generic.go:334] "Generic (PLEG): container finished" podID="80259512-c4ac-4362-b21e-386796e31645" containerID="c0ee7992858b9fd7a10962667478f845a8b192cef15d0cf18988e653d1d097ac" exitCode=2 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.811660 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7t7mh" event={"ID":"80259512-c4ac-4362-b21e-386796e31645","Type":"ContainerDied","Data":"c0ee7992858b9fd7a10962667478f845a8b192cef15d0cf18988e653d1d097ac"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.811735 4734 scope.go:117] "RemoveContainer" containerID="babd3dd41f3b6d6d397bb4f2a91b233fa2b3f06f0cca719f3bfc7b4926865b08" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.812169 4734 scope.go:117] "RemoveContainer" containerID="c0ee7992858b9fd7a10962667478f845a8b192cef15d0cf18988e653d1d097ac" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.819888 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/3.log" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.822651 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovn-acl-logging/0.log" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.823282 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovn-controller/0.log" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824156 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovnkube-controller/3.log" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824495 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="43fd484a9960544cf74472150ca51b7c7efdc7a3c46fe5c2f855fde6e0cc5b04" exitCode=0 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824529 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e" exitCode=0 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824541 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583" exitCode=0 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824556 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8" exitCode=0 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824569 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40" exitCode=0 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824581 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778" exitCode=0 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824591 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864" exitCode=143 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824600 4734 generic.go:334] "Generic (PLEG): container finished" podID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerID="e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5" exitCode=143 Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824620 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"43fd484a9960544cf74472150ca51b7c7efdc7a3c46fe5c2f855fde6e0cc5b04"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824653 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824665 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824676 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824685 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824693 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824702 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.824711 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5"} Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.828965 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovn-acl-logging/0.log" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.830743 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovn-controller/0.log" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.832052 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.835141 4734 scope.go:117] "RemoveContainer" containerID="4ec12579345d6e14f97bb91548b15120c148e9a9738f0c4fbeb50944d53eb71a" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897257 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z2jkk"] Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897639 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897659 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897678 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovn-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897686 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovn-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897706 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovn-acl-logging" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897719 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovn-acl-logging" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897730 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897738 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897748 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="northd" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897756 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="northd" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897776 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897784 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897799 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="sbdb" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897809 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="sbdb" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897822 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kubecfg-setup" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897831 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kubecfg-setup" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897847 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kube-rbac-proxy-node" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897856 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kube-rbac-proxy-node" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897873 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897883 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897894 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897902 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.897922 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="nbdb" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.897930 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="nbdb" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898154 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="sbdb" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898173 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898186 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898202 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kube-rbac-proxy-node" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898219 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="nbdb" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898238 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovn-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898247 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="northd" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898259 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898277 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovn-acl-logging" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898292 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: E1125 09:40:58.898500 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898522 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.898733 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.899277 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" containerName="ovnkube-controller" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.901971 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.919925 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-netd\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920212 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-slash\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920237 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-systemd-units\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920258 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-ovn\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920196 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920281 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-kubelet\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920261 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-slash" (OuterVolumeSpecName: "host-slash") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920299 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-ovn-kubernetes\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920329 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920358 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920370 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fvtk\" (UniqueName: \"kubernetes.io/projected/cbdcaaef-9e1d-421f-b1fa-05223f0067af-kube-api-access-9fvtk\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920411 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-node-log\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920463 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-config\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920487 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-var-lib-cni-networks-ovn-kubernetes\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920508 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-log-socket\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920547 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-systemd\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920570 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-etc-openvswitch\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920597 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-bin\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920621 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-env-overrides\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920647 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-openvswitch\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920669 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-script-lib\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920717 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-var-lib-openvswitch\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920742 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovn-node-metrics-cert\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920765 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-netns\") pod \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\" (UID: \"cbdcaaef-9e1d-421f-b1fa-05223f0067af\") " Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920376 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921204 4734 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921279 4734 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921355 4734 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921424 4734 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921245 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921244 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921107 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921660 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921633 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.921774 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-node-log" (OuterVolumeSpecName: "node-log") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.922018 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.922064 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.922145 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.920390 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.922174 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-log-socket" (OuterVolumeSpecName: "log-socket") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.922490 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.925893 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbdcaaef-9e1d-421f-b1fa-05223f0067af-kube-api-access-9fvtk" (OuterVolumeSpecName: "kube-api-access-9fvtk") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "kube-api-access-9fvtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.926151 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:40:58 crc kubenswrapper[4734]: I1125 09:40:58.941711 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "cbdcaaef-9e1d-421f-b1fa-05223f0067af" (UID: "cbdcaaef-9e1d-421f-b1fa-05223f0067af"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023102 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovn-node-metrics-cert\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023341 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-kubelet\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023421 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-cni-bin\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023491 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-log-socket\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023566 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-systemd\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023633 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-slash\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023782 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-cni-netd\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023858 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovnkube-script-lib\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023930 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovnkube-config\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.023991 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024068 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-ovn\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024164 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-node-log\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024230 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvkpw\" (UniqueName: \"kubernetes.io/projected/79913912-1ec6-4538-a4ac-03a7e2ac199f-kube-api-access-wvkpw\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024295 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-run-netns\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024357 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-run-ovn-kubernetes\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024427 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-env-overrides\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024502 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-var-lib-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024570 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024633 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-etc-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024701 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-systemd-units\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024867 4734 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.024952 4734 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025007 4734 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025060 4734 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025136 4734 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025218 4734 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025273 4734 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025325 4734 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025416 4734 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025471 4734 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025520 4734 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cbdcaaef-9e1d-421f-b1fa-05223f0067af-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025570 4734 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025632 4734 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025682 4734 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025731 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fvtk\" (UniqueName: \"kubernetes.io/projected/cbdcaaef-9e1d-421f-b1fa-05223f0067af-kube-api-access-9fvtk\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.025780 4734 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/cbdcaaef-9e1d-421f-b1fa-05223f0067af-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.126844 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovn-node-metrics-cert\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.126914 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-kubelet\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.126936 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-cni-bin\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.126957 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-log-socket\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.126975 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-systemd\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.126991 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-slash\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127010 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-cni-netd\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127033 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovnkube-script-lib\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127059 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovnkube-config\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127056 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-kubelet\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127110 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-log-socket\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127164 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127102 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127215 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-slash\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127217 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-systemd\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127228 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-cni-bin\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127233 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-cni-netd\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127247 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-ovn\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127505 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-node-log\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127277 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-run-ovn\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127566 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvkpw\" (UniqueName: \"kubernetes.io/projected/79913912-1ec6-4538-a4ac-03a7e2ac199f-kube-api-access-wvkpw\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127585 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-node-log\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127611 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-run-netns\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127670 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-run-ovn-kubernetes\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127735 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-env-overrides\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127766 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-var-lib-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127775 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-run-ovn-kubernetes\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127801 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127804 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-run-netns\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127836 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-etc-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127874 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-etc-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127935 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127942 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-var-lib-openvswitch\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.127976 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-systemd-units\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.128038 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/79913912-1ec6-4538-a4ac-03a7e2ac199f-systemd-units\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.128172 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-env-overrides\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.128419 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovnkube-script-lib\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.129411 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovnkube-config\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.130721 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/79913912-1ec6-4538-a4ac-03a7e2ac199f-ovn-node-metrics-cert\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.145367 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvkpw\" (UniqueName: \"kubernetes.io/projected/79913912-1ec6-4538-a4ac-03a7e2ac199f-kube-api-access-wvkpw\") pod \"ovnkube-node-z2jkk\" (UID: \"79913912-1ec6-4538-a4ac-03a7e2ac199f\") " pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.220866 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:40:59 crc kubenswrapper[4734]: W1125 09:40:59.238105 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79913912_1ec6_4538_a4ac_03a7e2ac199f.slice/crio-b95b01fa92acaf0a6721804655b999922d7b2b57445bc34105b8fe9675187dcf WatchSource:0}: Error finding container b95b01fa92acaf0a6721804655b999922d7b2b57445bc34105b8fe9675187dcf: Status 404 returned error can't find the container with id b95b01fa92acaf0a6721804655b999922d7b2b57445bc34105b8fe9675187dcf Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.831289 4734 generic.go:334] "Generic (PLEG): container finished" podID="79913912-1ec6-4538-a4ac-03a7e2ac199f" containerID="6813b2c1d917cbdb12bbaea955d92cbd56b0d8e8ae98f9de1620585ad9d57674" exitCode=0 Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.831565 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerDied","Data":"6813b2c1d917cbdb12bbaea955d92cbd56b0d8e8ae98f9de1620585ad9d57674"} Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.831593 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"b95b01fa92acaf0a6721804655b999922d7b2b57445bc34105b8fe9675187dcf"} Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.835989 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-7t7mh_80259512-c4ac-4362-b21e-386796e31645/kube-multus/2.log" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.836136 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-7t7mh" event={"ID":"80259512-c4ac-4362-b21e-386796e31645","Type":"ContainerStarted","Data":"b9e6eb03555a8d0889a9ff5d4f5a6fd791865cff999f5879b6c2dd1a6c95f47a"} Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.841877 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovn-acl-logging/0.log" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.842471 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-2vvjj_cbdcaaef-9e1d-421f-b1fa-05223f0067af/ovn-controller/0.log" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.843049 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" event={"ID":"cbdcaaef-9e1d-421f-b1fa-05223f0067af","Type":"ContainerDied","Data":"931bde0760310df67498c1541e9070111f4471c0e8a1ee56c2291367dd19dace"} Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.843100 4734 scope.go:117] "RemoveContainer" containerID="43fd484a9960544cf74472150ca51b7c7efdc7a3c46fe5c2f855fde6e0cc5b04" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.843255 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-2vvjj" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.899370 4734 scope.go:117] "RemoveContainer" containerID="37a4b8e196e43c48dbf1635501a5190e63b2b95b9c2c22613ec0a4fd61dd729e" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.936164 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2vvjj"] Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.939288 4734 scope.go:117] "RemoveContainer" containerID="88d5d8c99ece7b9aa7cf878ad8550de8e3fa07725494984e823e1ddeb0ca3583" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.943182 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-2vvjj"] Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.959332 4734 scope.go:117] "RemoveContainer" containerID="9a3c2adf427dddcd462a0204812e9502f42defd84e21283bfd0f01e2fb5814a8" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.968956 4734 scope.go:117] "RemoveContainer" containerID="2d3ce77d4ebc8f199eadf2eae6646dd22d77550af273de5ea89879e9fcec6f40" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.979353 4734 scope.go:117] "RemoveContainer" containerID="c100eb688b04ea5f0352c9715e48e4d1ff854af608a36b650db5e114375fe778" Nov 25 09:40:59 crc kubenswrapper[4734]: I1125 09:40:59.991073 4734 scope.go:117] "RemoveContainer" containerID="834e8d341f9037ed72792983e66081cfdc0a344cb6e168751bf1856e59c9b864" Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.004013 4734 scope.go:117] "RemoveContainer" containerID="e21fe13995ed1c869f4867e0645a2cfd68849c2fb011e1570da1524b5a7a27d5" Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.026239 4734 scope.go:117] "RemoveContainer" containerID="0436a6c642057d8d96d2096c58a6f7d683a66bfe50ce5b22b77263aedc010122" Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.253707 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbdcaaef-9e1d-421f-b1fa-05223f0067af" path="/var/lib/kubelet/pods/cbdcaaef-9e1d-421f-b1fa-05223f0067af/volumes" Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.853999 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"d2132939421aaa662f282e56db0d133e257d3ae335ba9b78af897d3395e39e3d"} Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.854316 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"c088223dfa50ddd6c0b86c4b1e5d18406c7e26feabb3afd4ca099e68467369b1"} Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.854328 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"7465a7cee091e07128b90e8d6ef21eff647445437f71de5c11084b794a8ddb67"} Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.854336 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"db64ff2237004ae6568794fb82183454da542ecad587e24640381f71e8bbdf91"} Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.854344 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"7140ccf43ea4eb45dc9598a03a4c0278619de44626a28611bc5987a6645adb69"} Nov 25 09:41:00 crc kubenswrapper[4734]: I1125 09:41:00.854352 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"093fa7dc5597dfae64c628145b713811d445f45b9fdc084a16f62fcb95acf1b7"} Nov 25 09:41:02 crc kubenswrapper[4734]: I1125 09:41:02.868045 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"761a27ddb01ff39153ac688959c981282062b3c81ca87b51ae984428545c32c9"} Nov 25 09:41:05 crc kubenswrapper[4734]: I1125 09:41:05.886757 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" event={"ID":"79913912-1ec6-4538-a4ac-03a7e2ac199f","Type":"ContainerStarted","Data":"25be7b2d3fd68e29840fe62949242174ad17d63b1f8f51cb502250ef1f785c20"} Nov 25 09:41:05 crc kubenswrapper[4734]: I1125 09:41:05.887208 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:41:05 crc kubenswrapper[4734]: I1125 09:41:05.887249 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:41:05 crc kubenswrapper[4734]: I1125 09:41:05.887261 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:41:05 crc kubenswrapper[4734]: I1125 09:41:05.916896 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:41:05 crc kubenswrapper[4734]: I1125 09:41:05.917066 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:41:05 crc kubenswrapper[4734]: I1125 09:41:05.918547 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" podStartSLOduration=7.918534124 podStartE2EDuration="7.918534124s" podCreationTimestamp="2025-11-25 09:40:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:41:05.91737779 +0000 UTC m=+788.727839784" watchObservedRunningTime="2025-11-25 09:41:05.918534124 +0000 UTC m=+788.728996108" Nov 25 09:41:20 crc kubenswrapper[4734]: I1125 09:41:20.696033 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:41:20 crc kubenswrapper[4734]: I1125 09:41:20.696558 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:41:29 crc kubenswrapper[4734]: I1125 09:41:29.241529 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z2jkk" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.177938 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-xfcmm"] Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.179160 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-xfcmm" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.183427 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-index-dockercfg-zhd86" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.183611 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.184267 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.235693 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-xfcmm"] Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.265124 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhfwj\" (UniqueName: \"kubernetes.io/projected/8fdf2494-680f-45c6-8357-749ed30027e2-kube-api-access-vhfwj\") pod \"mariadb-operator-index-xfcmm\" (UID: \"8fdf2494-680f-45c6-8357-749ed30027e2\") " pod="openstack-operators/mariadb-operator-index-xfcmm" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.366122 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhfwj\" (UniqueName: \"kubernetes.io/projected/8fdf2494-680f-45c6-8357-749ed30027e2-kube-api-access-vhfwj\") pod \"mariadb-operator-index-xfcmm\" (UID: \"8fdf2494-680f-45c6-8357-749ed30027e2\") " pod="openstack-operators/mariadb-operator-index-xfcmm" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.383623 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhfwj\" (UniqueName: \"kubernetes.io/projected/8fdf2494-680f-45c6-8357-749ed30027e2-kube-api-access-vhfwj\") pod \"mariadb-operator-index-xfcmm\" (UID: \"8fdf2494-680f-45c6-8357-749ed30027e2\") " pod="openstack-operators/mariadb-operator-index-xfcmm" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.497973 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-xfcmm" Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.919131 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-xfcmm"] Nov 25 09:41:31 crc kubenswrapper[4734]: I1125 09:41:31.941042 4734 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:41:32 crc kubenswrapper[4734]: I1125 09:41:32.075372 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-xfcmm" event={"ID":"8fdf2494-680f-45c6-8357-749ed30027e2","Type":"ContainerStarted","Data":"9939891b6d5b300f2b632b47701e3f2eb8471011aa0b5fb76b0e87b38949e021"} Nov 25 09:41:33 crc kubenswrapper[4734]: I1125 09:41:33.081398 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-xfcmm" event={"ID":"8fdf2494-680f-45c6-8357-749ed30027e2","Type":"ContainerStarted","Data":"815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f"} Nov 25 09:41:33 crc kubenswrapper[4734]: I1125 09:41:33.099762 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-xfcmm" podStartSLOduration=1.330157233 podStartE2EDuration="2.099736953s" podCreationTimestamp="2025-11-25 09:41:31 +0000 UTC" firstStartedPulling="2025-11-25 09:41:31.9408528 +0000 UTC m=+814.751314794" lastFinishedPulling="2025-11-25 09:41:32.71043252 +0000 UTC m=+815.520894514" observedRunningTime="2025-11-25 09:41:33.095568871 +0000 UTC m=+815.906030865" watchObservedRunningTime="2025-11-25 09:41:33.099736953 +0000 UTC m=+815.910198957" Nov 25 09:41:34 crc kubenswrapper[4734]: I1125 09:41:34.148770 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-xfcmm"] Nov 25 09:41:34 crc kubenswrapper[4734]: I1125 09:41:34.752569 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-index-qkvtg"] Nov 25 09:41:34 crc kubenswrapper[4734]: I1125 09:41:34.753362 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:34 crc kubenswrapper[4734]: I1125 09:41:34.762388 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-qkvtg"] Nov 25 09:41:34 crc kubenswrapper[4734]: I1125 09:41:34.909816 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvdzf\" (UniqueName: \"kubernetes.io/projected/7ec39d55-38e5-41fc-a8ab-91b874879bd9-kube-api-access-kvdzf\") pod \"mariadb-operator-index-qkvtg\" (UID: \"7ec39d55-38e5-41fc-a8ab-91b874879bd9\") " pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:35 crc kubenswrapper[4734]: I1125 09:41:35.011812 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvdzf\" (UniqueName: \"kubernetes.io/projected/7ec39d55-38e5-41fc-a8ab-91b874879bd9-kube-api-access-kvdzf\") pod \"mariadb-operator-index-qkvtg\" (UID: \"7ec39d55-38e5-41fc-a8ab-91b874879bd9\") " pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:35 crc kubenswrapper[4734]: I1125 09:41:35.042954 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvdzf\" (UniqueName: \"kubernetes.io/projected/7ec39d55-38e5-41fc-a8ab-91b874879bd9-kube-api-access-kvdzf\") pod \"mariadb-operator-index-qkvtg\" (UID: \"7ec39d55-38e5-41fc-a8ab-91b874879bd9\") " pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:35 crc kubenswrapper[4734]: I1125 09:41:35.070160 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:35 crc kubenswrapper[4734]: I1125 09:41:35.101830 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/mariadb-operator-index-xfcmm" podUID="8fdf2494-680f-45c6-8357-749ed30027e2" containerName="registry-server" containerID="cri-o://815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f" gracePeriod=2 Nov 25 09:41:35 crc kubenswrapper[4734]: I1125 09:41:35.532189 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-index-qkvtg"] Nov 25 09:41:35 crc kubenswrapper[4734]: W1125 09:41:35.542136 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ec39d55_38e5_41fc_a8ab_91b874879bd9.slice/crio-9e39c7af22900a0a505f0005f1a017bd593e39d7c5d803914c53d2f2baf2cb66 WatchSource:0}: Error finding container 9e39c7af22900a0a505f0005f1a017bd593e39d7c5d803914c53d2f2baf2cb66: Status 404 returned error can't find the container with id 9e39c7af22900a0a505f0005f1a017bd593e39d7c5d803914c53d2f2baf2cb66 Nov 25 09:41:35 crc kubenswrapper[4734]: I1125 09:41:35.932237 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-xfcmm" Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.027730 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhfwj\" (UniqueName: \"kubernetes.io/projected/8fdf2494-680f-45c6-8357-749ed30027e2-kube-api-access-vhfwj\") pod \"8fdf2494-680f-45c6-8357-749ed30027e2\" (UID: \"8fdf2494-680f-45c6-8357-749ed30027e2\") " Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.033787 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fdf2494-680f-45c6-8357-749ed30027e2-kube-api-access-vhfwj" (OuterVolumeSpecName: "kube-api-access-vhfwj") pod "8fdf2494-680f-45c6-8357-749ed30027e2" (UID: "8fdf2494-680f-45c6-8357-749ed30027e2"). InnerVolumeSpecName "kube-api-access-vhfwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.108431 4734 generic.go:334] "Generic (PLEG): container finished" podID="8fdf2494-680f-45c6-8357-749ed30027e2" containerID="815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f" exitCode=0 Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.108524 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-xfcmm" event={"ID":"8fdf2494-680f-45c6-8357-749ed30027e2","Type":"ContainerDied","Data":"815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f"} Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.108567 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-xfcmm" event={"ID":"8fdf2494-680f-45c6-8357-749ed30027e2","Type":"ContainerDied","Data":"9939891b6d5b300f2b632b47701e3f2eb8471011aa0b5fb76b0e87b38949e021"} Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.108591 4734 scope.go:117] "RemoveContainer" containerID="815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f" Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.108526 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-index-xfcmm" Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.109688 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-qkvtg" event={"ID":"7ec39d55-38e5-41fc-a8ab-91b874879bd9","Type":"ContainerStarted","Data":"9e39c7af22900a0a505f0005f1a017bd593e39d7c5d803914c53d2f2baf2cb66"} Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.128998 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhfwj\" (UniqueName: \"kubernetes.io/projected/8fdf2494-680f-45c6-8357-749ed30027e2-kube-api-access-vhfwj\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.132989 4734 scope.go:117] "RemoveContainer" containerID="815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f" Nov 25 09:41:36 crc kubenswrapper[4734]: E1125 09:41:36.133808 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f\": container with ID starting with 815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f not found: ID does not exist" containerID="815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f" Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.133899 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f"} err="failed to get container status \"815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f\": rpc error: code = NotFound desc = could not find container \"815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f\": container with ID starting with 815c45390eef131520206318a073c6717a9e89710ca4ac6ae5a2d0a19dc05b7f not found: ID does not exist" Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.143227 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/mariadb-operator-index-xfcmm"] Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.145946 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/mariadb-operator-index-xfcmm"] Nov 25 09:41:36 crc kubenswrapper[4734]: I1125 09:41:36.255789 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fdf2494-680f-45c6-8357-749ed30027e2" path="/var/lib/kubelet/pods/8fdf2494-680f-45c6-8357-749ed30027e2/volumes" Nov 25 09:41:37 crc kubenswrapper[4734]: I1125 09:41:37.118374 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-index-qkvtg" event={"ID":"7ec39d55-38e5-41fc-a8ab-91b874879bd9","Type":"ContainerStarted","Data":"c455edd40d0500af07b7383dbb05a3ddd154591ce799c5db701af3863d5546b1"} Nov 25 09:41:37 crc kubenswrapper[4734]: I1125 09:41:37.136926 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-index-qkvtg" podStartSLOduration=2.487411743 podStartE2EDuration="3.136908222s" podCreationTimestamp="2025-11-25 09:41:34 +0000 UTC" firstStartedPulling="2025-11-25 09:41:35.545895484 +0000 UTC m=+818.356357478" lastFinishedPulling="2025-11-25 09:41:36.195391963 +0000 UTC m=+819.005853957" observedRunningTime="2025-11-25 09:41:37.134331166 +0000 UTC m=+819.944793180" watchObservedRunningTime="2025-11-25 09:41:37.136908222 +0000 UTC m=+819.947370216" Nov 25 09:41:45 crc kubenswrapper[4734]: I1125 09:41:45.070774 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:45 crc kubenswrapper[4734]: I1125 09:41:45.071196 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:45 crc kubenswrapper[4734]: I1125 09:41:45.095901 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:45 crc kubenswrapper[4734]: I1125 09:41:45.189983 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-index-qkvtg" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.188114 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m"] Nov 25 09:41:46 crc kubenswrapper[4734]: E1125 09:41:46.188361 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fdf2494-680f-45c6-8357-749ed30027e2" containerName="registry-server" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.188376 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fdf2494-680f-45c6-8357-749ed30027e2" containerName="registry-server" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.188514 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fdf2494-680f-45c6-8357-749ed30027e2" containerName="registry-server" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.189391 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.193676 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9j88k" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.197170 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m"] Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.256711 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-bundle\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.256769 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-util\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.256827 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7qfz\" (UniqueName: \"kubernetes.io/projected/d1b07d68-ac56-4779-a322-25885bec56a4-kube-api-access-p7qfz\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.357774 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7qfz\" (UniqueName: \"kubernetes.io/projected/d1b07d68-ac56-4779-a322-25885bec56a4-kube-api-access-p7qfz\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.357895 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-bundle\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.357931 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-util\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.358544 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-util\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.358588 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-bundle\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.380407 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7qfz\" (UniqueName: \"kubernetes.io/projected/d1b07d68-ac56-4779-a322-25885bec56a4-kube-api-access-p7qfz\") pod \"c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.507332 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:46 crc kubenswrapper[4734]: I1125 09:41:46.889171 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m"] Nov 25 09:41:47 crc kubenswrapper[4734]: I1125 09:41:47.178177 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" event={"ID":"d1b07d68-ac56-4779-a322-25885bec56a4","Type":"ContainerStarted","Data":"27a8f861162a929497884eda56d430df030fa067fc72396bbaf25d02c17bfc86"} Nov 25 09:41:47 crc kubenswrapper[4734]: I1125 09:41:47.178596 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" event={"ID":"d1b07d68-ac56-4779-a322-25885bec56a4","Type":"ContainerStarted","Data":"f5f65afe710e70935d03b512ba4be1f559a9dd7628b73788b40e82524851fc08"} Nov 25 09:41:48 crc kubenswrapper[4734]: I1125 09:41:48.185951 4734 generic.go:334] "Generic (PLEG): container finished" podID="d1b07d68-ac56-4779-a322-25885bec56a4" containerID="27a8f861162a929497884eda56d430df030fa067fc72396bbaf25d02c17bfc86" exitCode=0 Nov 25 09:41:48 crc kubenswrapper[4734]: I1125 09:41:48.185990 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" event={"ID":"d1b07d68-ac56-4779-a322-25885bec56a4","Type":"ContainerDied","Data":"27a8f861162a929497884eda56d430df030fa067fc72396bbaf25d02c17bfc86"} Nov 25 09:41:50 crc kubenswrapper[4734]: I1125 09:41:50.696047 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:41:50 crc kubenswrapper[4734]: I1125 09:41:50.696458 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:41:50 crc kubenswrapper[4734]: I1125 09:41:50.696513 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:41:50 crc kubenswrapper[4734]: I1125 09:41:50.697211 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0a9c180e121b5cb8f4c6eca924f66d6e13dbe5f0459c72e12dd199f92062a51f"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:41:50 crc kubenswrapper[4734]: I1125 09:41:50.697278 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://0a9c180e121b5cb8f4c6eca924f66d6e13dbe5f0459c72e12dd199f92062a51f" gracePeriod=600 Nov 25 09:41:51 crc kubenswrapper[4734]: I1125 09:41:51.203570 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="0a9c180e121b5cb8f4c6eca924f66d6e13dbe5f0459c72e12dd199f92062a51f" exitCode=0 Nov 25 09:41:51 crc kubenswrapper[4734]: I1125 09:41:51.203639 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"0a9c180e121b5cb8f4c6eca924f66d6e13dbe5f0459c72e12dd199f92062a51f"} Nov 25 09:41:51 crc kubenswrapper[4734]: I1125 09:41:51.203980 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"3b8d398052ed40cef3b469389e339a6739d1c3bd141a5a0198cc0270e0a8927e"} Nov 25 09:41:51 crc kubenswrapper[4734]: I1125 09:41:51.204014 4734 scope.go:117] "RemoveContainer" containerID="e340e6ae7359f944d27daa5c3a4cb8f7f0ef52834b6c932f518c18e151b43653" Nov 25 09:41:51 crc kubenswrapper[4734]: I1125 09:41:51.206172 4734 generic.go:334] "Generic (PLEG): container finished" podID="d1b07d68-ac56-4779-a322-25885bec56a4" containerID="6cf70e4ad1ad4325fbeb55eac62e140a082ca323cc9f8ae111f481698cc91925" exitCode=0 Nov 25 09:41:51 crc kubenswrapper[4734]: I1125 09:41:51.206197 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" event={"ID":"d1b07d68-ac56-4779-a322-25885bec56a4","Type":"ContainerDied","Data":"6cf70e4ad1ad4325fbeb55eac62e140a082ca323cc9f8ae111f481698cc91925"} Nov 25 09:41:52 crc kubenswrapper[4734]: I1125 09:41:52.218413 4734 generic.go:334] "Generic (PLEG): container finished" podID="d1b07d68-ac56-4779-a322-25885bec56a4" containerID="85eb056f5d5eb7c0c1960dde7b0fcc788638f80b29f5b99db034877b2626068a" exitCode=0 Nov 25 09:41:52 crc kubenswrapper[4734]: I1125 09:41:52.218453 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" event={"ID":"d1b07d68-ac56-4779-a322-25885bec56a4","Type":"ContainerDied","Data":"85eb056f5d5eb7c0c1960dde7b0fcc788638f80b29f5b99db034877b2626068a"} Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.492533 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.577465 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-bundle\") pod \"d1b07d68-ac56-4779-a322-25885bec56a4\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.577710 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-util\") pod \"d1b07d68-ac56-4779-a322-25885bec56a4\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.577810 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7qfz\" (UniqueName: \"kubernetes.io/projected/d1b07d68-ac56-4779-a322-25885bec56a4-kube-api-access-p7qfz\") pod \"d1b07d68-ac56-4779-a322-25885bec56a4\" (UID: \"d1b07d68-ac56-4779-a322-25885bec56a4\") " Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.580844 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-bundle" (OuterVolumeSpecName: "bundle") pod "d1b07d68-ac56-4779-a322-25885bec56a4" (UID: "d1b07d68-ac56-4779-a322-25885bec56a4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.584211 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1b07d68-ac56-4779-a322-25885bec56a4-kube-api-access-p7qfz" (OuterVolumeSpecName: "kube-api-access-p7qfz") pod "d1b07d68-ac56-4779-a322-25885bec56a4" (UID: "d1b07d68-ac56-4779-a322-25885bec56a4"). InnerVolumeSpecName "kube-api-access-p7qfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.587759 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-util" (OuterVolumeSpecName: "util") pod "d1b07d68-ac56-4779-a322-25885bec56a4" (UID: "d1b07d68-ac56-4779-a322-25885bec56a4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.679512 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7qfz\" (UniqueName: \"kubernetes.io/projected/d1b07d68-ac56-4779-a322-25885bec56a4-kube-api-access-p7qfz\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.679570 4734 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:53 crc kubenswrapper[4734]: I1125 09:41:53.679591 4734 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d1b07d68-ac56-4779-a322-25885bec56a4-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:41:54 crc kubenswrapper[4734]: I1125 09:41:54.231281 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" event={"ID":"d1b07d68-ac56-4779-a322-25885bec56a4","Type":"ContainerDied","Data":"f5f65afe710e70935d03b512ba4be1f559a9dd7628b73788b40e82524851fc08"} Nov 25 09:41:54 crc kubenswrapper[4734]: I1125 09:41:54.231325 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5f65afe710e70935d03b512ba4be1f559a9dd7628b73788b40e82524851fc08" Nov 25 09:41:54 crc kubenswrapper[4734]: I1125 09:41:54.231335 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/c976308faac62824ee875fa80dce4db57a79e32adb8a627dd31cdf72f6c9q8m" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.297388 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8"] Nov 25 09:41:59 crc kubenswrapper[4734]: E1125 09:41:59.298014 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b07d68-ac56-4779-a322-25885bec56a4" containerName="pull" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.298036 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b07d68-ac56-4779-a322-25885bec56a4" containerName="pull" Nov 25 09:41:59 crc kubenswrapper[4734]: E1125 09:41:59.298054 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b07d68-ac56-4779-a322-25885bec56a4" containerName="util" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.298064 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b07d68-ac56-4779-a322-25885bec56a4" containerName="util" Nov 25 09:41:59 crc kubenswrapper[4734]: E1125 09:41:59.298103 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1b07d68-ac56-4779-a322-25885bec56a4" containerName="extract" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.298113 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1b07d68-ac56-4779-a322-25885bec56a4" containerName="extract" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.298225 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1b07d68-ac56-4779-a322-25885bec56a4" containerName="extract" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.298698 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.300976 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-92b7n" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.307342 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-service-cert" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.307551 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.313201 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8"] Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.471892 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ae285841-6883-40fd-aa4c-dc13e1afdf95-webhook-cert\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.471976 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9vq9\" (UniqueName: \"kubernetes.io/projected/ae285841-6883-40fd-aa4c-dc13e1afdf95-kube-api-access-s9vq9\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.472007 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ae285841-6883-40fd-aa4c-dc13e1afdf95-apiservice-cert\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.573328 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9vq9\" (UniqueName: \"kubernetes.io/projected/ae285841-6883-40fd-aa4c-dc13e1afdf95-kube-api-access-s9vq9\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.573383 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ae285841-6883-40fd-aa4c-dc13e1afdf95-apiservice-cert\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.573458 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ae285841-6883-40fd-aa4c-dc13e1afdf95-webhook-cert\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.586117 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ae285841-6883-40fd-aa4c-dc13e1afdf95-webhook-cert\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.586283 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ae285841-6883-40fd-aa4c-dc13e1afdf95-apiservice-cert\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.590350 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9vq9\" (UniqueName: \"kubernetes.io/projected/ae285841-6883-40fd-aa4c-dc13e1afdf95-kube-api-access-s9vq9\") pod \"mariadb-operator-controller-manager-7d8c8fd467-kkqb8\" (UID: \"ae285841-6883-40fd-aa4c-dc13e1afdf95\") " pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.622429 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:41:59 crc kubenswrapper[4734]: I1125 09:41:59.940449 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8"] Nov 25 09:42:00 crc kubenswrapper[4734]: I1125 09:42:00.266767 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" event={"ID":"ae285841-6883-40fd-aa4c-dc13e1afdf95","Type":"ContainerStarted","Data":"760316c7b2bdb09f3bf18db7132f92fb25d20004778988d7d6d569be02e063a9"} Nov 25 09:42:04 crc kubenswrapper[4734]: I1125 09:42:04.323566 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" event={"ID":"ae285841-6883-40fd-aa4c-dc13e1afdf95","Type":"ContainerStarted","Data":"cb4bfa871f9f584476931d0e330c8b0d41e81993189fdcea0d71d5ed616d55dd"} Nov 25 09:42:04 crc kubenswrapper[4734]: I1125 09:42:04.324149 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:42:04 crc kubenswrapper[4734]: I1125 09:42:04.341353 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" podStartSLOduration=2.03836446 podStartE2EDuration="5.34133758s" podCreationTimestamp="2025-11-25 09:41:59 +0000 UTC" firstStartedPulling="2025-11-25 09:41:59.951800478 +0000 UTC m=+842.762262472" lastFinishedPulling="2025-11-25 09:42:03.254773598 +0000 UTC m=+846.065235592" observedRunningTime="2025-11-25 09:42:04.337657663 +0000 UTC m=+847.148119657" watchObservedRunningTime="2025-11-25 09:42:04.34133758 +0000 UTC m=+847.151799574" Nov 25 09:42:09 crc kubenswrapper[4734]: I1125 09:42:09.628875 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.518851 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w"] Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.520343 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.522330 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.532368 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w"] Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.683830 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.683927 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttvxg\" (UniqueName: \"kubernetes.io/projected/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-kube-api-access-ttvxg\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.683986 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.784842 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttvxg\" (UniqueName: \"kubernetes.io/projected/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-kube-api-access-ttvxg\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.784920 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.784963 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.785564 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.785617 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.803479 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttvxg\" (UniqueName: \"kubernetes.io/projected/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-kube-api-access-ttvxg\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:15 crc kubenswrapper[4734]: I1125 09:42:15.840306 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:16 crc kubenswrapper[4734]: I1125 09:42:16.229523 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w"] Nov 25 09:42:16 crc kubenswrapper[4734]: I1125 09:42:16.389886 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" event={"ID":"f1910d43-3749-46f2-9ea2-9c3a75c6bda6","Type":"ContainerStarted","Data":"71bfd625da5e2d1e1396e057d9f3af32c83df2371602098b2a3c5a01cb7d64d2"} Nov 25 09:42:17 crc kubenswrapper[4734]: I1125 09:42:17.396567 4734 generic.go:334] "Generic (PLEG): container finished" podID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerID="5bedee460ecb72acf666280cb34b82cd9e34db60b5f0577457f18b6ad7d8fec2" exitCode=0 Nov 25 09:42:17 crc kubenswrapper[4734]: I1125 09:42:17.396877 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" event={"ID":"f1910d43-3749-46f2-9ea2-9c3a75c6bda6","Type":"ContainerDied","Data":"5bedee460ecb72acf666280cb34b82cd9e34db60b5f0577457f18b6ad7d8fec2"} Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.082240 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9h4b5"] Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.083821 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.101004 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9h4b5"] Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.226188 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jnnw\" (UniqueName: \"kubernetes.io/projected/be33ef05-1f40-4843-9c35-5d74df09ce71-kube-api-access-9jnnw\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.226261 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-utilities\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.226313 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-catalog-content\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.327545 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jnnw\" (UniqueName: \"kubernetes.io/projected/be33ef05-1f40-4843-9c35-5d74df09ce71-kube-api-access-9jnnw\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.327598 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-utilities\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.327634 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-catalog-content\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.328282 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-utilities\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.328297 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-catalog-content\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.351932 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jnnw\" (UniqueName: \"kubernetes.io/projected/be33ef05-1f40-4843-9c35-5d74df09ce71-kube-api-access-9jnnw\") pod \"redhat-operators-9h4b5\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.411806 4734 generic.go:334] "Generic (PLEG): container finished" podID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerID="b514c2adffdc4cd9752954e7c71dfa10c5e39a6a28c79aadec606c9808034175" exitCode=0 Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.411845 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" event={"ID":"f1910d43-3749-46f2-9ea2-9c3a75c6bda6","Type":"ContainerDied","Data":"b514c2adffdc4cd9752954e7c71dfa10c5e39a6a28c79aadec606c9808034175"} Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.416838 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:19 crc kubenswrapper[4734]: I1125 09:42:19.863427 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9h4b5"] Nov 25 09:42:20 crc kubenswrapper[4734]: I1125 09:42:20.417039 4734 generic.go:334] "Generic (PLEG): container finished" podID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerID="2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2" exitCode=0 Nov 25 09:42:20 crc kubenswrapper[4734]: I1125 09:42:20.417138 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9h4b5" event={"ID":"be33ef05-1f40-4843-9c35-5d74df09ce71","Type":"ContainerDied","Data":"2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2"} Nov 25 09:42:20 crc kubenswrapper[4734]: I1125 09:42:20.417455 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9h4b5" event={"ID":"be33ef05-1f40-4843-9c35-5d74df09ce71","Type":"ContainerStarted","Data":"647230a6686ea3818deed395b9436a09b6f947ac5f65efbaa3450f9ded4ece76"} Nov 25 09:42:20 crc kubenswrapper[4734]: I1125 09:42:20.420509 4734 generic.go:334] "Generic (PLEG): container finished" podID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerID="835c180b95a4a448287f117f9616e51aaee466b9c6bd4eb223313ba68c7430da" exitCode=0 Nov 25 09:42:20 crc kubenswrapper[4734]: I1125 09:42:20.420545 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" event={"ID":"f1910d43-3749-46f2-9ea2-9c3a75c6bda6","Type":"ContainerDied","Data":"835c180b95a4a448287f117f9616e51aaee466b9c6bd4eb223313ba68c7430da"} Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.428187 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9h4b5" event={"ID":"be33ef05-1f40-4843-9c35-5d74df09ce71","Type":"ContainerStarted","Data":"ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857"} Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.684971 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.859672 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttvxg\" (UniqueName: \"kubernetes.io/projected/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-kube-api-access-ttvxg\") pod \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.859751 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-util\") pod \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.859779 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-bundle\") pod \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\" (UID: \"f1910d43-3749-46f2-9ea2-9c3a75c6bda6\") " Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.862384 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-bundle" (OuterVolumeSpecName: "bundle") pod "f1910d43-3749-46f2-9ea2-9c3a75c6bda6" (UID: "f1910d43-3749-46f2-9ea2-9c3a75c6bda6"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.867892 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-kube-api-access-ttvxg" (OuterVolumeSpecName: "kube-api-access-ttvxg") pod "f1910d43-3749-46f2-9ea2-9c3a75c6bda6" (UID: "f1910d43-3749-46f2-9ea2-9c3a75c6bda6"). InnerVolumeSpecName "kube-api-access-ttvxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.962834 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttvxg\" (UniqueName: \"kubernetes.io/projected/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-kube-api-access-ttvxg\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.963093 4734 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:21 crc kubenswrapper[4734]: I1125 09:42:21.995942 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-util" (OuterVolumeSpecName: "util") pod "f1910d43-3749-46f2-9ea2-9c3a75c6bda6" (UID: "f1910d43-3749-46f2-9ea2-9c3a75c6bda6"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:42:22 crc kubenswrapper[4734]: I1125 09:42:22.064736 4734 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f1910d43-3749-46f2-9ea2-9c3a75c6bda6-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:22 crc kubenswrapper[4734]: I1125 09:42:22.434477 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" event={"ID":"f1910d43-3749-46f2-9ea2-9c3a75c6bda6","Type":"ContainerDied","Data":"71bfd625da5e2d1e1396e057d9f3af32c83df2371602098b2a3c5a01cb7d64d2"} Nov 25 09:42:22 crc kubenswrapper[4734]: I1125 09:42:22.434519 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71bfd625da5e2d1e1396e057d9f3af32c83df2371602098b2a3c5a01cb7d64d2" Nov 25 09:42:22 crc kubenswrapper[4734]: I1125 09:42:22.434659 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6mff8w" Nov 25 09:42:22 crc kubenswrapper[4734]: I1125 09:42:22.435921 4734 generic.go:334] "Generic (PLEG): container finished" podID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerID="ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857" exitCode=0 Nov 25 09:42:22 crc kubenswrapper[4734]: I1125 09:42:22.435950 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9h4b5" event={"ID":"be33ef05-1f40-4843-9c35-5d74df09ce71","Type":"ContainerDied","Data":"ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857"} Nov 25 09:42:23 crc kubenswrapper[4734]: I1125 09:42:23.445797 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9h4b5" event={"ID":"be33ef05-1f40-4843-9c35-5d74df09ce71","Type":"ContainerStarted","Data":"1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683"} Nov 25 09:42:23 crc kubenswrapper[4734]: I1125 09:42:23.465897 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9h4b5" podStartSLOduration=2.051190937 podStartE2EDuration="4.465881259s" podCreationTimestamp="2025-11-25 09:42:19 +0000 UTC" firstStartedPulling="2025-11-25 09:42:20.418716855 +0000 UTC m=+863.229178849" lastFinishedPulling="2025-11-25 09:42:22.833407177 +0000 UTC m=+865.643869171" observedRunningTime="2025-11-25 09:42:23.46315854 +0000 UTC m=+866.273620534" watchObservedRunningTime="2025-11-25 09:42:23.465881259 +0000 UTC m=+866.276343253" Nov 25 09:42:29 crc kubenswrapper[4734]: I1125 09:42:29.417674 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:29 crc kubenswrapper[4734]: I1125 09:42:29.418035 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:29 crc kubenswrapper[4734]: I1125 09:42:29.455733 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:29 crc kubenswrapper[4734]: I1125 09:42:29.535482 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.074836 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9h4b5"] Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.075327 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9h4b5" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerName="registry-server" containerID="cri-o://1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683" gracePeriod=2 Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.473317 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.492264 4734 generic.go:334] "Generic (PLEG): container finished" podID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerID="1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683" exitCode=0 Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.492312 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9h4b5" event={"ID":"be33ef05-1f40-4843-9c35-5d74df09ce71","Type":"ContainerDied","Data":"1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683"} Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.492347 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9h4b5" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.492376 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9h4b5" event={"ID":"be33ef05-1f40-4843-9c35-5d74df09ce71","Type":"ContainerDied","Data":"647230a6686ea3818deed395b9436a09b6f947ac5f65efbaa3450f9ded4ece76"} Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.492396 4734 scope.go:117] "RemoveContainer" containerID="1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.514458 4734 scope.go:117] "RemoveContainer" containerID="ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.539956 4734 scope.go:117] "RemoveContainer" containerID="2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.568288 4734 scope.go:117] "RemoveContainer" containerID="1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.568811 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683\": container with ID starting with 1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683 not found: ID does not exist" containerID="1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.568856 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683"} err="failed to get container status \"1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683\": rpc error: code = NotFound desc = could not find container \"1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683\": container with ID starting with 1ad24fdccd4d75ec2a922414a5888237eda339d8de39df70ea8b835133dff683 not found: ID does not exist" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.568884 4734 scope.go:117] "RemoveContainer" containerID="ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.569516 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857\": container with ID starting with ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857 not found: ID does not exist" containerID="ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.569546 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857"} err="failed to get container status \"ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857\": rpc error: code = NotFound desc = could not find container \"ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857\": container with ID starting with ba611c034e6b818009ded301ffdbecbd21fb34cabb9160ca2f1997c66aa24857 not found: ID does not exist" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.569562 4734 scope.go:117] "RemoveContainer" containerID="2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.569903 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2\": container with ID starting with 2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2 not found: ID does not exist" containerID="2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.569924 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2"} err="failed to get container status \"2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2\": rpc error: code = NotFound desc = could not find container \"2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2\": container with ID starting with 2fdb42a9b3ee4f5398742d9ba83255a898d885002a4b0d760a8ae75e288902d2 not found: ID does not exist" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.598336 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jnnw\" (UniqueName: \"kubernetes.io/projected/be33ef05-1f40-4843-9c35-5d74df09ce71-kube-api-access-9jnnw\") pod \"be33ef05-1f40-4843-9c35-5d74df09ce71\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.598467 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-catalog-content\") pod \"be33ef05-1f40-4843-9c35-5d74df09ce71\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.598518 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-utilities\") pod \"be33ef05-1f40-4843-9c35-5d74df09ce71\" (UID: \"be33ef05-1f40-4843-9c35-5d74df09ce71\") " Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.599655 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-utilities" (OuterVolumeSpecName: "utilities") pod "be33ef05-1f40-4843-9c35-5d74df09ce71" (UID: "be33ef05-1f40-4843-9c35-5d74df09ce71"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.608651 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be33ef05-1f40-4843-9c35-5d74df09ce71-kube-api-access-9jnnw" (OuterVolumeSpecName: "kube-api-access-9jnnw") pod "be33ef05-1f40-4843-9c35-5d74df09ce71" (UID: "be33ef05-1f40-4843-9c35-5d74df09ce71"). InnerVolumeSpecName "kube-api-access-9jnnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.697297 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be33ef05-1f40-4843-9c35-5d74df09ce71" (UID: "be33ef05-1f40-4843-9c35-5d74df09ce71"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.699862 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f"] Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700064 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jnnw\" (UniqueName: \"kubernetes.io/projected/be33ef05-1f40-4843-9c35-5d74df09ce71-kube-api-access-9jnnw\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700352 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700379 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be33ef05-1f40-4843-9c35-5d74df09ce71-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.700116 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerName="pull" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700403 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerName="pull" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.700416 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerName="util" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700423 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerName="util" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.700435 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerName="registry-server" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700445 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerName="registry-server" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.700460 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerName="extract-utilities" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700469 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerName="extract-utilities" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.700483 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerName="extract-content" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700490 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerName="extract-content" Nov 25 09:42:32 crc kubenswrapper[4734]: E1125 09:42:32.700503 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerName="extract" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700511 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerName="extract" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700639 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1910d43-3749-46f2-9ea2-9c3a75c6bda6" containerName="extract" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.700656 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" containerName="registry-server" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.701071 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.703156 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.704464 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.704822 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-r68k6" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.705020 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.718244 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.720230 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f"] Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.800978 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-apiservice-cert\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.801038 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmccf\" (UniqueName: \"kubernetes.io/projected/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-kube-api-access-tmccf\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.801064 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-webhook-cert\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.823427 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9h4b5"] Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.831984 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9h4b5"] Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.916321 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-apiservice-cert\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.916385 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmccf\" (UniqueName: \"kubernetes.io/projected/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-kube-api-access-tmccf\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.916420 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-webhook-cert\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.920303 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-apiservice-cert\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.926583 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-webhook-cert\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.949842 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmccf\" (UniqueName: \"kubernetes.io/projected/e96f81f5-7d7b-4580-9174-f5b46d5f1ea6-kube-api-access-tmccf\") pod \"metallb-operator-controller-manager-664f78b8b-n7g4f\" (UID: \"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6\") " pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.997153 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c"] Nov 25 09:42:32 crc kubenswrapper[4734]: I1125 09:42:32.997986 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.000034 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-j9gqb" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.000999 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.002270 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.017183 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c"] Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.021669 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.119069 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-webhook-cert\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.119179 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-apiservice-cert\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.119211 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9tkb\" (UniqueName: \"kubernetes.io/projected/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-kube-api-access-b9tkb\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.219971 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-webhook-cert\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.220042 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-apiservice-cert\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.220065 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9tkb\" (UniqueName: \"kubernetes.io/projected/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-kube-api-access-b9tkb\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.225471 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-apiservice-cert\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.236723 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-webhook-cert\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.237010 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9tkb\" (UniqueName: \"kubernetes.io/projected/d55bf784-99e5-42b2-8ec3-8dbf3d3aa547-kube-api-access-b9tkb\") pod \"metallb-operator-webhook-server-75bd4c87d6-fr76c\" (UID: \"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547\") " pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.280830 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f"] Nov 25 09:42:33 crc kubenswrapper[4734]: W1125 09:42:33.289146 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode96f81f5_7d7b_4580_9174_f5b46d5f1ea6.slice/crio-feb39ce2019bc18f4f233e39bbfd637180dbebe08975d6bb612e3fb1cd9a6445 WatchSource:0}: Error finding container feb39ce2019bc18f4f233e39bbfd637180dbebe08975d6bb612e3fb1cd9a6445: Status 404 returned error can't find the container with id feb39ce2019bc18f4f233e39bbfd637180dbebe08975d6bb612e3fb1cd9a6445 Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.314207 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.501140 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" event={"ID":"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6","Type":"ContainerStarted","Data":"feb39ce2019bc18f4f233e39bbfd637180dbebe08975d6bb612e3fb1cd9a6445"} Nov 25 09:42:33 crc kubenswrapper[4734]: I1125 09:42:33.565682 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c"] Nov 25 09:42:33 crc kubenswrapper[4734]: W1125 09:42:33.585761 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd55bf784_99e5_42b2_8ec3_8dbf3d3aa547.slice/crio-bc7f523de9a43230f27c5045964ca581128b9a40dd6b934fcd665495d2392430 WatchSource:0}: Error finding container bc7f523de9a43230f27c5045964ca581128b9a40dd6b934fcd665495d2392430: Status 404 returned error can't find the container with id bc7f523de9a43230f27c5045964ca581128b9a40dd6b934fcd665495d2392430 Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.082059 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bt4z2"] Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.083561 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.102435 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bt4z2"] Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.232400 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-utilities\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.235116 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-catalog-content\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.235761 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8pnj\" (UniqueName: \"kubernetes.io/projected/0ad4be77-3472-41a6-9696-29f24eb248be-kube-api-access-q8pnj\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.254047 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be33ef05-1f40-4843-9c35-5d74df09ce71" path="/var/lib/kubelet/pods/be33ef05-1f40-4843-9c35-5d74df09ce71/volumes" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.337195 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-utilities\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.337245 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-catalog-content\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.337284 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8pnj\" (UniqueName: \"kubernetes.io/projected/0ad4be77-3472-41a6-9696-29f24eb248be-kube-api-access-q8pnj\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.338118 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-utilities\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.338201 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-catalog-content\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.372448 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8pnj\" (UniqueName: \"kubernetes.io/projected/0ad4be77-3472-41a6-9696-29f24eb248be-kube-api-access-q8pnj\") pod \"community-operators-bt4z2\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.399974 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.517279 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" event={"ID":"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547","Type":"ContainerStarted","Data":"bc7f523de9a43230f27c5045964ca581128b9a40dd6b934fcd665495d2392430"} Nov 25 09:42:34 crc kubenswrapper[4734]: I1125 09:42:34.897684 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bt4z2"] Nov 25 09:42:34 crc kubenswrapper[4734]: W1125 09:42:34.917836 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ad4be77_3472_41a6_9696_29f24eb248be.slice/crio-122722e2e3fc726f0eed2e9b41005adde25c261401c0cc933c5d5c14270c921b WatchSource:0}: Error finding container 122722e2e3fc726f0eed2e9b41005adde25c261401c0cc933c5d5c14270c921b: Status 404 returned error can't find the container with id 122722e2e3fc726f0eed2e9b41005adde25c261401c0cc933c5d5c14270c921b Nov 25 09:42:35 crc kubenswrapper[4734]: I1125 09:42:35.523436 4734 generic.go:334] "Generic (PLEG): container finished" podID="0ad4be77-3472-41a6-9696-29f24eb248be" containerID="55f84f35832beb9984b2bd4c4183762d4899b694ebe0b554a338b7970d7b0cd7" exitCode=0 Nov 25 09:42:35 crc kubenswrapper[4734]: I1125 09:42:35.523695 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bt4z2" event={"ID":"0ad4be77-3472-41a6-9696-29f24eb248be","Type":"ContainerDied","Data":"55f84f35832beb9984b2bd4c4183762d4899b694ebe0b554a338b7970d7b0cd7"} Nov 25 09:42:35 crc kubenswrapper[4734]: I1125 09:42:35.523718 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bt4z2" event={"ID":"0ad4be77-3472-41a6-9696-29f24eb248be","Type":"ContainerStarted","Data":"122722e2e3fc726f0eed2e9b41005adde25c261401c0cc933c5d5c14270c921b"} Nov 25 09:42:38 crc kubenswrapper[4734]: I1125 09:42:38.539695 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" event={"ID":"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6","Type":"ContainerStarted","Data":"a35b7abc65ef7865dc9477490333d7276d0cf7ccdd883315fe1776db28642be6"} Nov 25 09:42:38 crc kubenswrapper[4734]: I1125 09:42:38.540277 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:42:38 crc kubenswrapper[4734]: I1125 09:42:38.541898 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" event={"ID":"d55bf784-99e5-42b2-8ec3-8dbf3d3aa547","Type":"ContainerStarted","Data":"5bc977e6a76cc45f8f73872904ebc96a2dfdc91255f224a1472f48b81102485a"} Nov 25 09:42:38 crc kubenswrapper[4734]: I1125 09:42:38.542025 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:38 crc kubenswrapper[4734]: I1125 09:42:38.543628 4734 generic.go:334] "Generic (PLEG): container finished" podID="0ad4be77-3472-41a6-9696-29f24eb248be" containerID="544af142c8c371d6c344824647851e5725e1c072d2fc2d9aa100bf6cf491866f" exitCode=0 Nov 25 09:42:38 crc kubenswrapper[4734]: I1125 09:42:38.543693 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bt4z2" event={"ID":"0ad4be77-3472-41a6-9696-29f24eb248be","Type":"ContainerDied","Data":"544af142c8c371d6c344824647851e5725e1c072d2fc2d9aa100bf6cf491866f"} Nov 25 09:42:38 crc kubenswrapper[4734]: I1125 09:42:38.566626 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" podStartSLOduration=1.863069337 podStartE2EDuration="6.566608905s" podCreationTimestamp="2025-11-25 09:42:32 +0000 UTC" firstStartedPulling="2025-11-25 09:42:33.290752513 +0000 UTC m=+876.101214507" lastFinishedPulling="2025-11-25 09:42:37.994292081 +0000 UTC m=+880.804754075" observedRunningTime="2025-11-25 09:42:38.562605588 +0000 UTC m=+881.373067602" watchObservedRunningTime="2025-11-25 09:42:38.566608905 +0000 UTC m=+881.377070899" Nov 25 09:42:38 crc kubenswrapper[4734]: I1125 09:42:38.600212 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" podStartSLOduration=2.194786081 podStartE2EDuration="6.600194687s" podCreationTimestamp="2025-11-25 09:42:32 +0000 UTC" firstStartedPulling="2025-11-25 09:42:33.590795381 +0000 UTC m=+876.401257375" lastFinishedPulling="2025-11-25 09:42:37.996203987 +0000 UTC m=+880.806665981" observedRunningTime="2025-11-25 09:42:38.596797638 +0000 UTC m=+881.407259632" watchObservedRunningTime="2025-11-25 09:42:38.600194687 +0000 UTC m=+881.410656681" Nov 25 09:42:39 crc kubenswrapper[4734]: I1125 09:42:39.556388 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bt4z2" event={"ID":"0ad4be77-3472-41a6-9696-29f24eb248be","Type":"ContainerStarted","Data":"8e76fb6a2e214acbac8fd404dd9a5eaf844ed20edeae7fbfdc73fa6fc2c093be"} Nov 25 09:42:39 crc kubenswrapper[4734]: I1125 09:42:39.572471 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bt4z2" podStartSLOduration=2.02106848 podStartE2EDuration="5.572450009s" podCreationTimestamp="2025-11-25 09:42:34 +0000 UTC" firstStartedPulling="2025-11-25 09:42:35.525277481 +0000 UTC m=+878.335739475" lastFinishedPulling="2025-11-25 09:42:39.076659 +0000 UTC m=+881.887121004" observedRunningTime="2025-11-25 09:42:39.570149941 +0000 UTC m=+882.380611935" watchObservedRunningTime="2025-11-25 09:42:39.572450009 +0000 UTC m=+882.382912003" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.498368 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-t2kh8"] Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.499615 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.511965 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t2kh8"] Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.617162 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-catalog-content\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.617240 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-utilities\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.617420 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-698c4\" (UniqueName: \"kubernetes.io/projected/f176610e-ee47-4057-bb2b-c01b0867e6f3-kube-api-access-698c4\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.718806 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-catalog-content\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.718874 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-utilities\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.718916 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-698c4\" (UniqueName: \"kubernetes.io/projected/f176610e-ee47-4057-bb2b-c01b0867e6f3-kube-api-access-698c4\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.719382 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-catalog-content\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.719458 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-utilities\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.744243 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-698c4\" (UniqueName: \"kubernetes.io/projected/f176610e-ee47-4057-bb2b-c01b0867e6f3-kube-api-access-698c4\") pod \"redhat-marketplace-t2kh8\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:40 crc kubenswrapper[4734]: I1125 09:42:40.816181 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:41 crc kubenswrapper[4734]: I1125 09:42:41.044884 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-t2kh8"] Nov 25 09:42:41 crc kubenswrapper[4734]: W1125 09:42:41.049264 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf176610e_ee47_4057_bb2b_c01b0867e6f3.slice/crio-07f8ef48cbb7720b655721cd40a251ec47477c9a5d0f8ec21090ca6877f6b6fe WatchSource:0}: Error finding container 07f8ef48cbb7720b655721cd40a251ec47477c9a5d0f8ec21090ca6877f6b6fe: Status 404 returned error can't find the container with id 07f8ef48cbb7720b655721cd40a251ec47477c9a5d0f8ec21090ca6877f6b6fe Nov 25 09:42:41 crc kubenswrapper[4734]: I1125 09:42:41.572056 4734 generic.go:334] "Generic (PLEG): container finished" podID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerID="a7b9371d327ae023357474ac1123aa0777cc2f0b2fb5899c1092645a24d85f13" exitCode=0 Nov 25 09:42:41 crc kubenswrapper[4734]: I1125 09:42:41.572126 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t2kh8" event={"ID":"f176610e-ee47-4057-bb2b-c01b0867e6f3","Type":"ContainerDied","Data":"a7b9371d327ae023357474ac1123aa0777cc2f0b2fb5899c1092645a24d85f13"} Nov 25 09:42:41 crc kubenswrapper[4734]: I1125 09:42:41.572154 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t2kh8" event={"ID":"f176610e-ee47-4057-bb2b-c01b0867e6f3","Type":"ContainerStarted","Data":"07f8ef48cbb7720b655721cd40a251ec47477c9a5d0f8ec21090ca6877f6b6fe"} Nov 25 09:42:43 crc kubenswrapper[4734]: I1125 09:42:43.583348 4734 generic.go:334] "Generic (PLEG): container finished" podID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerID="6c4ff78027a5f4874f0111609eef0d5abf7a672bc02998e8c477cc2f598d3b31" exitCode=0 Nov 25 09:42:43 crc kubenswrapper[4734]: I1125 09:42:43.583422 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t2kh8" event={"ID":"f176610e-ee47-4057-bb2b-c01b0867e6f3","Type":"ContainerDied","Data":"6c4ff78027a5f4874f0111609eef0d5abf7a672bc02998e8c477cc2f598d3b31"} Nov 25 09:42:44 crc kubenswrapper[4734]: I1125 09:42:44.400367 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:44 crc kubenswrapper[4734]: I1125 09:42:44.400697 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:44 crc kubenswrapper[4734]: I1125 09:42:44.451105 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:44 crc kubenswrapper[4734]: I1125 09:42:44.661464 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:46 crc kubenswrapper[4734]: I1125 09:42:46.616182 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t2kh8" event={"ID":"f176610e-ee47-4057-bb2b-c01b0867e6f3","Type":"ContainerStarted","Data":"f75f086bcf63e2ac1efcde2f51ba50fabb395b807ba6609423776aee1aeb752f"} Nov 25 09:42:46 crc kubenswrapper[4734]: I1125 09:42:46.634108 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-t2kh8" podStartSLOduration=2.527939714 podStartE2EDuration="6.634063634s" podCreationTimestamp="2025-11-25 09:42:40 +0000 UTC" firstStartedPulling="2025-11-25 09:42:41.573803362 +0000 UTC m=+884.384265356" lastFinishedPulling="2025-11-25 09:42:45.679927282 +0000 UTC m=+888.490389276" observedRunningTime="2025-11-25 09:42:46.631078007 +0000 UTC m=+889.441540001" watchObservedRunningTime="2025-11-25 09:42:46.634063634 +0000 UTC m=+889.444525628" Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.272187 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bt4z2"] Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.272425 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bt4z2" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" containerName="registry-server" containerID="cri-o://8e76fb6a2e214acbac8fd404dd9a5eaf844ed20edeae7fbfdc73fa6fc2c093be" gracePeriod=2 Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.631875 4734 generic.go:334] "Generic (PLEG): container finished" podID="0ad4be77-3472-41a6-9696-29f24eb248be" containerID="8e76fb6a2e214acbac8fd404dd9a5eaf844ed20edeae7fbfdc73fa6fc2c093be" exitCode=0 Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.632068 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bt4z2" event={"ID":"0ad4be77-3472-41a6-9696-29f24eb248be","Type":"ContainerDied","Data":"8e76fb6a2e214acbac8fd404dd9a5eaf844ed20edeae7fbfdc73fa6fc2c093be"} Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.632243 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bt4z2" event={"ID":"0ad4be77-3472-41a6-9696-29f24eb248be","Type":"ContainerDied","Data":"122722e2e3fc726f0eed2e9b41005adde25c261401c0cc933c5d5c14270c921b"} Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.632268 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="122722e2e3fc726f0eed2e9b41005adde25c261401c0cc933c5d5c14270c921b" Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.654716 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.822454 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-catalog-content\") pod \"0ad4be77-3472-41a6-9696-29f24eb248be\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.822508 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8pnj\" (UniqueName: \"kubernetes.io/projected/0ad4be77-3472-41a6-9696-29f24eb248be-kube-api-access-q8pnj\") pod \"0ad4be77-3472-41a6-9696-29f24eb248be\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.822586 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-utilities\") pod \"0ad4be77-3472-41a6-9696-29f24eb248be\" (UID: \"0ad4be77-3472-41a6-9696-29f24eb248be\") " Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.823372 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-utilities" (OuterVolumeSpecName: "utilities") pod "0ad4be77-3472-41a6-9696-29f24eb248be" (UID: "0ad4be77-3472-41a6-9696-29f24eb248be"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.823650 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.828315 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ad4be77-3472-41a6-9696-29f24eb248be-kube-api-access-q8pnj" (OuterVolumeSpecName: "kube-api-access-q8pnj") pod "0ad4be77-3472-41a6-9696-29f24eb248be" (UID: "0ad4be77-3472-41a6-9696-29f24eb248be"). InnerVolumeSpecName "kube-api-access-q8pnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.884621 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0ad4be77-3472-41a6-9696-29f24eb248be" (UID: "0ad4be77-3472-41a6-9696-29f24eb248be"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.924840 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ad4be77-3472-41a6-9696-29f24eb248be-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:48 crc kubenswrapper[4734]: I1125 09:42:48.924882 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8pnj\" (UniqueName: \"kubernetes.io/projected/0ad4be77-3472-41a6-9696-29f24eb248be-kube-api-access-q8pnj\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:49 crc kubenswrapper[4734]: I1125 09:42:49.636428 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bt4z2" Nov 25 09:42:49 crc kubenswrapper[4734]: I1125 09:42:49.664909 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bt4z2"] Nov 25 09:42:49 crc kubenswrapper[4734]: I1125 09:42:49.667262 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bt4z2"] Nov 25 09:42:50 crc kubenswrapper[4734]: I1125 09:42:50.252900 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" path="/var/lib/kubelet/pods/0ad4be77-3472-41a6-9696-29f24eb248be/volumes" Nov 25 09:42:50 crc kubenswrapper[4734]: I1125 09:42:50.817441 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:50 crc kubenswrapper[4734]: I1125 09:42:50.817760 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:50 crc kubenswrapper[4734]: I1125 09:42:50.858767 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:51 crc kubenswrapper[4734]: I1125 09:42:51.683428 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:53 crc kubenswrapper[4734]: I1125 09:42:53.318378 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-75bd4c87d6-fr76c" Nov 25 09:42:54 crc kubenswrapper[4734]: I1125 09:42:54.473365 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t2kh8"] Nov 25 09:42:54 crc kubenswrapper[4734]: I1125 09:42:54.473640 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-t2kh8" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerName="registry-server" containerID="cri-o://f75f086bcf63e2ac1efcde2f51ba50fabb395b807ba6609423776aee1aeb752f" gracePeriod=2 Nov 25 09:42:54 crc kubenswrapper[4734]: I1125 09:42:54.664217 4734 generic.go:334] "Generic (PLEG): container finished" podID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerID="f75f086bcf63e2ac1efcde2f51ba50fabb395b807ba6609423776aee1aeb752f" exitCode=0 Nov 25 09:42:54 crc kubenswrapper[4734]: I1125 09:42:54.664295 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t2kh8" event={"ID":"f176610e-ee47-4057-bb2b-c01b0867e6f3","Type":"ContainerDied","Data":"f75f086bcf63e2ac1efcde2f51ba50fabb395b807ba6609423776aee1aeb752f"} Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.323265 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.508295 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-utilities\") pod \"f176610e-ee47-4057-bb2b-c01b0867e6f3\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.508390 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-catalog-content\") pod \"f176610e-ee47-4057-bb2b-c01b0867e6f3\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.508466 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-698c4\" (UniqueName: \"kubernetes.io/projected/f176610e-ee47-4057-bb2b-c01b0867e6f3-kube-api-access-698c4\") pod \"f176610e-ee47-4057-bb2b-c01b0867e6f3\" (UID: \"f176610e-ee47-4057-bb2b-c01b0867e6f3\") " Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.509395 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-utilities" (OuterVolumeSpecName: "utilities") pod "f176610e-ee47-4057-bb2b-c01b0867e6f3" (UID: "f176610e-ee47-4057-bb2b-c01b0867e6f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.515216 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f176610e-ee47-4057-bb2b-c01b0867e6f3-kube-api-access-698c4" (OuterVolumeSpecName: "kube-api-access-698c4") pod "f176610e-ee47-4057-bb2b-c01b0867e6f3" (UID: "f176610e-ee47-4057-bb2b-c01b0867e6f3"). InnerVolumeSpecName "kube-api-access-698c4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.526338 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f176610e-ee47-4057-bb2b-c01b0867e6f3" (UID: "f176610e-ee47-4057-bb2b-c01b0867e6f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.610315 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.610363 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f176610e-ee47-4057-bb2b-c01b0867e6f3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.610380 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-698c4\" (UniqueName: \"kubernetes.io/projected/f176610e-ee47-4057-bb2b-c01b0867e6f3-kube-api-access-698c4\") on node \"crc\" DevicePath \"\"" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.672228 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-t2kh8" event={"ID":"f176610e-ee47-4057-bb2b-c01b0867e6f3","Type":"ContainerDied","Data":"07f8ef48cbb7720b655721cd40a251ec47477c9a5d0f8ec21090ca6877f6b6fe"} Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.672287 4734 scope.go:117] "RemoveContainer" containerID="f75f086bcf63e2ac1efcde2f51ba50fabb395b807ba6609423776aee1aeb752f" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.672288 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-t2kh8" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.697666 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-t2kh8"] Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.702518 4734 scope.go:117] "RemoveContainer" containerID="6c4ff78027a5f4874f0111609eef0d5abf7a672bc02998e8c477cc2f598d3b31" Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.706893 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-t2kh8"] Nov 25 09:42:55 crc kubenswrapper[4734]: I1125 09:42:55.769548 4734 scope.go:117] "RemoveContainer" containerID="a7b9371d327ae023357474ac1123aa0777cc2f0b2fb5899c1092645a24d85f13" Nov 25 09:42:56 crc kubenswrapper[4734]: I1125 09:42:56.253382 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" path="/var/lib/kubelet/pods/f176610e-ee47-4057-bb2b-c01b0867e6f3/volumes" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.024477 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.713326 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-qtwbc"] Nov 25 09:43:13 crc kubenswrapper[4734]: E1125 09:43:13.713949 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" containerName="registry-server" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.713969 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" containerName="registry-server" Nov 25 09:43:13 crc kubenswrapper[4734]: E1125 09:43:13.713989 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerName="extract-content" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.713997 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerName="extract-content" Nov 25 09:43:13 crc kubenswrapper[4734]: E1125 09:43:13.714012 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerName="registry-server" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.714021 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerName="registry-server" Nov 25 09:43:13 crc kubenswrapper[4734]: E1125 09:43:13.714034 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" containerName="extract-utilities" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.714041 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" containerName="extract-utilities" Nov 25 09:43:13 crc kubenswrapper[4734]: E1125 09:43:13.714054 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" containerName="extract-content" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.714061 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" containerName="extract-content" Nov 25 09:43:13 crc kubenswrapper[4734]: E1125 09:43:13.714069 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerName="extract-utilities" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.714076 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerName="extract-utilities" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.714222 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ad4be77-3472-41a6-9696-29f24eb248be" containerName="registry-server" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.714242 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f176610e-ee47-4057-bb2b-c01b0867e6f3" containerName="registry-server" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.716757 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.717273 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn"] Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.718265 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.719757 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.719957 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.720245 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.720439 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-b7h8z" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.738152 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn"] Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.792781 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-j6bv7"] Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.794500 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-j6bv7" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.796911 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.797763 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.798050 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-jjgzz" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.798252 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.812618 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-vcb2q"] Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.813675 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.815800 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.824598 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-vcb2q"] Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.843649 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-sockets\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.843716 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spwt6\" (UniqueName: \"kubernetes.io/projected/415eb831-abdf-4454-b8f0-f560fb1d0639-kube-api-access-spwt6\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.843746 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics-certs\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.843795 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-startup\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.843910 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-conf\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.843975 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/456e466e-374e-4332-8ce1-0f84cdfdd462-cert\") pod \"frr-k8s-webhook-server-6998585d5-5c6gn\" (UID: \"456e466e-374e-4332-8ce1-0f84cdfdd462\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.844019 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.844152 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-reloader\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.844184 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gn2n\" (UniqueName: \"kubernetes.io/projected/456e466e-374e-4332-8ce1-0f84cdfdd462-kube-api-access-4gn2n\") pod \"frr-k8s-webhook-server-6998585d5-5c6gn\" (UID: \"456e466e-374e-4332-8ce1-0f84cdfdd462\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.945917 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1a832207-ad0f-4751-96be-a318d59d3c00-metallb-excludel2\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.946269 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b78lb\" (UniqueName: \"kubernetes.io/projected/1a832207-ad0f-4751-96be-a318d59d3c00-kube-api-access-b78lb\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.946398 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8267\" (UniqueName: \"kubernetes.io/projected/5776ef6f-c2b4-4d10-9975-c047a8290421-kube-api-access-z8267\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.946519 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.946650 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-reloader\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.946751 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gn2n\" (UniqueName: \"kubernetes.io/projected/456e466e-374e-4332-8ce1-0f84cdfdd462-kube-api-access-4gn2n\") pod \"frr-k8s-webhook-server-6998585d5-5c6gn\" (UID: \"456e466e-374e-4332-8ce1-0f84cdfdd462\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.946843 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5776ef6f-c2b4-4d10-9975-c047a8290421-cert\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.946960 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-sockets\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947100 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spwt6\" (UniqueName: \"kubernetes.io/projected/415eb831-abdf-4454-b8f0-f560fb1d0639-kube-api-access-spwt6\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947214 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics-certs\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947332 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-startup\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947445 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-metrics-certs\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947557 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-conf\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947673 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/456e466e-374e-4332-8ce1-0f84cdfdd462-cert\") pod \"frr-k8s-webhook-server-6998585d5-5c6gn\" (UID: \"456e466e-374e-4332-8ce1-0f84cdfdd462\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947797 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947908 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5776ef6f-c2b4-4d10-9975-c047a8290421-metrics-certs\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947128 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-reloader\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947828 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-conf\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.947268 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-sockets\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: E1125 09:43:13.947341 4734 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 25 09:43:13 crc kubenswrapper[4734]: E1125 09:43:13.948462 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics-certs podName:415eb831-abdf-4454-b8f0-f560fb1d0639 nodeName:}" failed. No retries permitted until 2025-11-25 09:43:14.448445149 +0000 UTC m=+917.258907143 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics-certs") pod "frr-k8s-qtwbc" (UID: "415eb831-abdf-4454-b8f0-f560fb1d0639") : secret "frr-k8s-certs-secret" not found Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.948237 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/415eb831-abdf-4454-b8f0-f560fb1d0639-frr-startup\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.948033 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.955954 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/456e466e-374e-4332-8ce1-0f84cdfdd462-cert\") pod \"frr-k8s-webhook-server-6998585d5-5c6gn\" (UID: \"456e466e-374e-4332-8ce1-0f84cdfdd462\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.967948 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gn2n\" (UniqueName: \"kubernetes.io/projected/456e466e-374e-4332-8ce1-0f84cdfdd462-kube-api-access-4gn2n\") pod \"frr-k8s-webhook-server-6998585d5-5c6gn\" (UID: \"456e466e-374e-4332-8ce1-0f84cdfdd462\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:13 crc kubenswrapper[4734]: I1125 09:43:13.976792 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spwt6\" (UniqueName: \"kubernetes.io/projected/415eb831-abdf-4454-b8f0-f560fb1d0639-kube-api-access-spwt6\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.049615 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-metrics-certs\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.049689 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5776ef6f-c2b4-4d10-9975-c047a8290421-metrics-certs\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.049716 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1a832207-ad0f-4751-96be-a318d59d3c00-metallb-excludel2\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.049738 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b78lb\" (UniqueName: \"kubernetes.io/projected/1a832207-ad0f-4751-96be-a318d59d3c00-kube-api-access-b78lb\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.049767 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8267\" (UniqueName: \"kubernetes.io/projected/5776ef6f-c2b4-4d10-9975-c047a8290421-kube-api-access-z8267\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.049789 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.049827 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5776ef6f-c2b4-4d10-9975-c047a8290421-cert\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:14 crc kubenswrapper[4734]: E1125 09:43:14.050016 4734 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 25 09:43:14 crc kubenswrapper[4734]: E1125 09:43:14.050077 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-metrics-certs podName:1a832207-ad0f-4751-96be-a318d59d3c00 nodeName:}" failed. No retries permitted until 2025-11-25 09:43:14.550060419 +0000 UTC m=+917.360522413 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-metrics-certs") pod "speaker-j6bv7" (UID: "1a832207-ad0f-4751-96be-a318d59d3c00") : secret "speaker-certs-secret" not found Nov 25 09:43:14 crc kubenswrapper[4734]: E1125 09:43:14.051223 4734 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 09:43:14 crc kubenswrapper[4734]: E1125 09:43:14.051296 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist podName:1a832207-ad0f-4751-96be-a318d59d3c00 nodeName:}" failed. No retries permitted until 2025-11-25 09:43:14.551275214 +0000 UTC m=+917.361737268 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist") pod "speaker-j6bv7" (UID: "1a832207-ad0f-4751-96be-a318d59d3c00") : secret "metallb-memberlist" not found Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.051712 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1a832207-ad0f-4751-96be-a318d59d3c00-metallb-excludel2\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.053169 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.053688 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5776ef6f-c2b4-4d10-9975-c047a8290421-metrics-certs\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.059395 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.063908 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5776ef6f-c2b4-4d10-9975-c047a8290421-cert\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.067894 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b78lb\" (UniqueName: \"kubernetes.io/projected/1a832207-ad0f-4751-96be-a318d59d3c00-kube-api-access-b78lb\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.077235 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8267\" (UniqueName: \"kubernetes.io/projected/5776ef6f-c2b4-4d10-9975-c047a8290421-kube-api-access-z8267\") pod \"controller-6c7b4b5f48-vcb2q\" (UID: \"5776ef6f-c2b4-4d10-9975-c047a8290421\") " pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.130943 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.276426 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn"] Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.375423 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-vcb2q"] Nov 25 09:43:14 crc kubenswrapper[4734]: W1125 09:43:14.381136 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5776ef6f_c2b4_4d10_9975_c047a8290421.slice/crio-ba0b05ff4953a9a8d9dcb9c87bb38567858982b3d6539a3cf5d1c9d4d43aff1a WatchSource:0}: Error finding container ba0b05ff4953a9a8d9dcb9c87bb38567858982b3d6539a3cf5d1c9d4d43aff1a: Status 404 returned error can't find the container with id ba0b05ff4953a9a8d9dcb9c87bb38567858982b3d6539a3cf5d1c9d4d43aff1a Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.456629 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics-certs\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.461888 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/415eb831-abdf-4454-b8f0-f560fb1d0639-metrics-certs\") pod \"frr-k8s-qtwbc\" (UID: \"415eb831-abdf-4454-b8f0-f560fb1d0639\") " pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.558341 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: E1125 09:43:14.558523 4734 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.558580 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-metrics-certs\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: E1125 09:43:14.558599 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist podName:1a832207-ad0f-4751-96be-a318d59d3c00 nodeName:}" failed. No retries permitted until 2025-11-25 09:43:15.558581109 +0000 UTC m=+918.369043103 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist") pod "speaker-j6bv7" (UID: "1a832207-ad0f-4751-96be-a318d59d3c00") : secret "metallb-memberlist" not found Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.563927 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-metrics-certs\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.654120 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.776576 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" event={"ID":"456e466e-374e-4332-8ce1-0f84cdfdd462","Type":"ContainerStarted","Data":"a0da6633099259bab7c0e94c4ed8b73c99e1c762a3ec7c13615aa3917f59a05e"} Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.778451 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vcb2q" event={"ID":"5776ef6f-c2b4-4d10-9975-c047a8290421","Type":"ContainerStarted","Data":"8c051b9d4b590225f1911ecf51dda7dcb06b8087a15bb4f8317de4d6ed0b5ad5"} Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.778476 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vcb2q" event={"ID":"5776ef6f-c2b4-4d10-9975-c047a8290421","Type":"ContainerStarted","Data":"ba0b05ff4953a9a8d9dcb9c87bb38567858982b3d6539a3cf5d1c9d4d43aff1a"} Nov 25 09:43:14 crc kubenswrapper[4734]: I1125 09:43:14.779403 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerStarted","Data":"cbba3c3d7252b3c89486f9cc481d912a2bbbe10ca13c7acf3b87c73334217335"} Nov 25 09:43:15 crc kubenswrapper[4734]: I1125 09:43:15.572729 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:15 crc kubenswrapper[4734]: I1125 09:43:15.576706 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1a832207-ad0f-4751-96be-a318d59d3c00-memberlist\") pod \"speaker-j6bv7\" (UID: \"1a832207-ad0f-4751-96be-a318d59d3c00\") " pod="metallb-system/speaker-j6bv7" Nov 25 09:43:15 crc kubenswrapper[4734]: I1125 09:43:15.611801 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-j6bv7" Nov 25 09:43:15 crc kubenswrapper[4734]: W1125 09:43:15.635767 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a832207_ad0f_4751_96be_a318d59d3c00.slice/crio-14f4d0c9063ff059c08cac802523170db49fb43ec6579490cf20d8be28062315 WatchSource:0}: Error finding container 14f4d0c9063ff059c08cac802523170db49fb43ec6579490cf20d8be28062315: Status 404 returned error can't find the container with id 14f4d0c9063ff059c08cac802523170db49fb43ec6579490cf20d8be28062315 Nov 25 09:43:15 crc kubenswrapper[4734]: I1125 09:43:15.831317 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-j6bv7" event={"ID":"1a832207-ad0f-4751-96be-a318d59d3c00","Type":"ContainerStarted","Data":"14f4d0c9063ff059c08cac802523170db49fb43ec6579490cf20d8be28062315"} Nov 25 09:43:16 crc kubenswrapper[4734]: I1125 09:43:16.850172 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-j6bv7" event={"ID":"1a832207-ad0f-4751-96be-a318d59d3c00","Type":"ContainerStarted","Data":"63bf2efe2dc21cbd4ed7ea03947cbbca1db350163a80172730b609a95ed135c1"} Nov 25 09:43:18 crc kubenswrapper[4734]: I1125 09:43:18.867707 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-j6bv7" event={"ID":"1a832207-ad0f-4751-96be-a318d59d3c00","Type":"ContainerStarted","Data":"5714fa185691a4d0aad6edcb739a2e533f7bbefff0e5730682d564485ca68e0d"} Nov 25 09:43:18 crc kubenswrapper[4734]: I1125 09:43:18.867971 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-j6bv7" Nov 25 09:43:18 crc kubenswrapper[4734]: I1125 09:43:18.869712 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vcb2q" event={"ID":"5776ef6f-c2b4-4d10-9975-c047a8290421","Type":"ContainerStarted","Data":"c3d0894f86d3ddb23e4687bf751a60152cbfa9b4973f0b47022cb2500c5215fc"} Nov 25 09:43:18 crc kubenswrapper[4734]: I1125 09:43:18.869834 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:18 crc kubenswrapper[4734]: I1125 09:43:18.886746 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-j6bv7" podStartSLOduration=3.51466418 podStartE2EDuration="5.886726556s" podCreationTimestamp="2025-11-25 09:43:13 +0000 UTC" firstStartedPulling="2025-11-25 09:43:16.015349189 +0000 UTC m=+918.825811183" lastFinishedPulling="2025-11-25 09:43:18.387411565 +0000 UTC m=+921.197873559" observedRunningTime="2025-11-25 09:43:18.882297707 +0000 UTC m=+921.692759711" watchObservedRunningTime="2025-11-25 09:43:18.886726556 +0000 UTC m=+921.697188540" Nov 25 09:43:18 crc kubenswrapper[4734]: I1125 09:43:18.906617 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-vcb2q" podStartSLOduration=2.021691542 podStartE2EDuration="5.906600157s" podCreationTimestamp="2025-11-25 09:43:13 +0000 UTC" firstStartedPulling="2025-11-25 09:43:14.499040219 +0000 UTC m=+917.309502213" lastFinishedPulling="2025-11-25 09:43:18.383948834 +0000 UTC m=+921.194410828" observedRunningTime="2025-11-25 09:43:18.897895303 +0000 UTC m=+921.708357297" watchObservedRunningTime="2025-11-25 09:43:18.906600157 +0000 UTC m=+921.717062151" Nov 25 09:43:21 crc kubenswrapper[4734]: I1125 09:43:21.888903 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" event={"ID":"456e466e-374e-4332-8ce1-0f84cdfdd462","Type":"ContainerStarted","Data":"2fcafb4f042ff888c1b1b93adcb0786229dc4619461592d00a80aa3ebd47e9dc"} Nov 25 09:43:21 crc kubenswrapper[4734]: I1125 09:43:21.889513 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:21 crc kubenswrapper[4734]: I1125 09:43:21.891634 4734 generic.go:334] "Generic (PLEG): container finished" podID="415eb831-abdf-4454-b8f0-f560fb1d0639" containerID="b043038131ca1d82f276e22c51af16a91c5b28f7f40ffbf41231f6e5082af5f3" exitCode=0 Nov 25 09:43:21 crc kubenswrapper[4734]: I1125 09:43:21.891897 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerDied","Data":"b043038131ca1d82f276e22c51af16a91c5b28f7f40ffbf41231f6e5082af5f3"} Nov 25 09:43:21 crc kubenswrapper[4734]: I1125 09:43:21.908756 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" podStartSLOduration=1.603981045 podStartE2EDuration="8.908694924s" podCreationTimestamp="2025-11-25 09:43:13 +0000 UTC" firstStartedPulling="2025-11-25 09:43:14.285677844 +0000 UTC m=+917.096139838" lastFinishedPulling="2025-11-25 09:43:21.590391723 +0000 UTC m=+924.400853717" observedRunningTime="2025-11-25 09:43:21.903974857 +0000 UTC m=+924.714436851" watchObservedRunningTime="2025-11-25 09:43:21.908694924 +0000 UTC m=+924.719156918" Nov 25 09:43:22 crc kubenswrapper[4734]: I1125 09:43:22.899922 4734 generic.go:334] "Generic (PLEG): container finished" podID="415eb831-abdf-4454-b8f0-f560fb1d0639" containerID="ecd850b63691ed7ba402f7a3ece75b285b9287740161ad843890dfc2cbc02567" exitCode=0 Nov 25 09:43:22 crc kubenswrapper[4734]: I1125 09:43:22.900020 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerDied","Data":"ecd850b63691ed7ba402f7a3ece75b285b9287740161ad843890dfc2cbc02567"} Nov 25 09:43:23 crc kubenswrapper[4734]: I1125 09:43:23.907142 4734 generic.go:334] "Generic (PLEG): container finished" podID="415eb831-abdf-4454-b8f0-f560fb1d0639" containerID="a4fda80aa02e41ebc427fd461740c14a742792db83ad05519af7661ee58018ba" exitCode=0 Nov 25 09:43:23 crc kubenswrapper[4734]: I1125 09:43:23.907186 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerDied","Data":"a4fda80aa02e41ebc427fd461740c14a742792db83ad05519af7661ee58018ba"} Nov 25 09:43:24 crc kubenswrapper[4734]: I1125 09:43:24.134448 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-vcb2q" Nov 25 09:43:24 crc kubenswrapper[4734]: I1125 09:43:24.919831 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerStarted","Data":"d61c516bdd6e2cb4ab01b906cbb1444f67504686fc120fc2549c965d1e3413d3"} Nov 25 09:43:24 crc kubenswrapper[4734]: I1125 09:43:24.920072 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerStarted","Data":"f22a5e54464582443e57840d6c0d5c1b25de5420a054c511ab9d7086cc5af260"} Nov 25 09:43:24 crc kubenswrapper[4734]: I1125 09:43:24.920098 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerStarted","Data":"54b255c79390cdd0623cc6502576ec5d89abb92ef75e94f3cd013d9b7ce2071d"} Nov 25 09:43:25 crc kubenswrapper[4734]: I1125 09:43:25.934044 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerStarted","Data":"5b23021626001915c857019158556eb2f2d92756398a6644c14347dc36a51750"} Nov 25 09:43:25 crc kubenswrapper[4734]: I1125 09:43:25.934131 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerStarted","Data":"0b50ee96b5111c139816dfa0c8659c05d85d1f0c7193b73e8406c5b61318f93b"} Nov 25 09:43:25 crc kubenswrapper[4734]: I1125 09:43:25.934147 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qtwbc" event={"ID":"415eb831-abdf-4454-b8f0-f560fb1d0639","Type":"ContainerStarted","Data":"2523fc8c5bdf5f77bb38e83e467bc87cb12d0625c115307d97ce2b38071dc567"} Nov 25 09:43:25 crc kubenswrapper[4734]: I1125 09:43:25.934258 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:25 crc kubenswrapper[4734]: I1125 09:43:25.961245 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-qtwbc" podStartSLOduration=6.122021242 podStartE2EDuration="12.961218968s" podCreationTimestamp="2025-11-25 09:43:13 +0000 UTC" firstStartedPulling="2025-11-25 09:43:14.75368969 +0000 UTC m=+917.564151684" lastFinishedPulling="2025-11-25 09:43:21.592887416 +0000 UTC m=+924.403349410" observedRunningTime="2025-11-25 09:43:25.957337785 +0000 UTC m=+928.767799809" watchObservedRunningTime="2025-11-25 09:43:25.961218968 +0000 UTC m=+928.771680962" Nov 25 09:43:29 crc kubenswrapper[4734]: I1125 09:43:29.655296 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:29 crc kubenswrapper[4734]: I1125 09:43:29.695813 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:34 crc kubenswrapper[4734]: I1125 09:43:34.062897 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-5c6gn" Nov 25 09:43:34 crc kubenswrapper[4734]: I1125 09:43:34.656142 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-qtwbc" Nov 25 09:43:35 crc kubenswrapper[4734]: I1125 09:43:35.614505 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-j6bv7" Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.026675 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-index-ffbkf"] Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.027340 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.030242 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-index-dockercfg-txb4w" Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.038184 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-ffbkf"] Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.159780 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jhdv\" (UniqueName: \"kubernetes.io/projected/264193c8-d7e4-43f9-ba29-55707b591bd4-kube-api-access-2jhdv\") pod \"infra-operator-index-ffbkf\" (UID: \"264193c8-d7e4-43f9-ba29-55707b591bd4\") " pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.261772 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jhdv\" (UniqueName: \"kubernetes.io/projected/264193c8-d7e4-43f9-ba29-55707b591bd4-kube-api-access-2jhdv\") pod \"infra-operator-index-ffbkf\" (UID: \"264193c8-d7e4-43f9-ba29-55707b591bd4\") " pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.285210 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jhdv\" (UniqueName: \"kubernetes.io/projected/264193c8-d7e4-43f9-ba29-55707b591bd4-kube-api-access-2jhdv\") pod \"infra-operator-index-ffbkf\" (UID: \"264193c8-d7e4-43f9-ba29-55707b591bd4\") " pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.344419 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.619758 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-index-ffbkf"] Nov 25 09:43:36 crc kubenswrapper[4734]: I1125 09:43:36.993806 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-ffbkf" event={"ID":"264193c8-d7e4-43f9-ba29-55707b591bd4","Type":"ContainerStarted","Data":"60ec8c1f30b11cb8a3aaa18cf2fadcdc139fd89b9d36ec7df9c69f23899eb437"} Nov 25 09:43:38 crc kubenswrapper[4734]: I1125 09:43:38.007642 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-index-ffbkf" event={"ID":"264193c8-d7e4-43f9-ba29-55707b591bd4","Type":"ContainerStarted","Data":"270b275ef11e7b9394dea31767cd079a75f0b66c849357c1d70126530c5614ef"} Nov 25 09:43:38 crc kubenswrapper[4734]: I1125 09:43:38.022761 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-index-ffbkf" podStartSLOduration=1.205718535 podStartE2EDuration="2.022745901s" podCreationTimestamp="2025-11-25 09:43:36 +0000 UTC" firstStartedPulling="2025-11-25 09:43:36.638552532 +0000 UTC m=+939.449014526" lastFinishedPulling="2025-11-25 09:43:37.455579898 +0000 UTC m=+940.266041892" observedRunningTime="2025-11-25 09:43:38.020804624 +0000 UTC m=+940.831266618" watchObservedRunningTime="2025-11-25 09:43:38.022745901 +0000 UTC m=+940.833207895" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.427847 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dmjdt"] Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.429701 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.453383 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dmjdt"] Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.554935 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-utilities\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.554978 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4nkj\" (UniqueName: \"kubernetes.io/projected/630d60ba-b306-4278-b6bd-e4d1d46aaa18-kube-api-access-d4nkj\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.555013 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-catalog-content\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.656507 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-utilities\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.656582 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4nkj\" (UniqueName: \"kubernetes.io/projected/630d60ba-b306-4278-b6bd-e4d1d46aaa18-kube-api-access-d4nkj\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.656636 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-catalog-content\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.657277 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-catalog-content\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.657751 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-utilities\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.683547 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4nkj\" (UniqueName: \"kubernetes.io/projected/630d60ba-b306-4278-b6bd-e4d1d46aaa18-kube-api-access-d4nkj\") pod \"certified-operators-dmjdt\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:43 crc kubenswrapper[4734]: I1125 09:43:43.747829 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:44 crc kubenswrapper[4734]: I1125 09:43:44.275281 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dmjdt"] Nov 25 09:43:45 crc kubenswrapper[4734]: I1125 09:43:45.052015 4734 generic.go:334] "Generic (PLEG): container finished" podID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerID="f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855" exitCode=0 Nov 25 09:43:45 crc kubenswrapper[4734]: I1125 09:43:45.052118 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmjdt" event={"ID":"630d60ba-b306-4278-b6bd-e4d1d46aaa18","Type":"ContainerDied","Data":"f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855"} Nov 25 09:43:45 crc kubenswrapper[4734]: I1125 09:43:45.052452 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmjdt" event={"ID":"630d60ba-b306-4278-b6bd-e4d1d46aaa18","Type":"ContainerStarted","Data":"06bb1bd925d03e6a01e18cfa8b05d0b3a88a1627f0f1d1b93fe984e43ef98b9d"} Nov 25 09:43:46 crc kubenswrapper[4734]: I1125 09:43:46.059689 4734 generic.go:334] "Generic (PLEG): container finished" podID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerID="c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830" exitCode=0 Nov 25 09:43:46 crc kubenswrapper[4734]: I1125 09:43:46.059729 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmjdt" event={"ID":"630d60ba-b306-4278-b6bd-e4d1d46aaa18","Type":"ContainerDied","Data":"c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830"} Nov 25 09:43:46 crc kubenswrapper[4734]: I1125 09:43:46.345472 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:46 crc kubenswrapper[4734]: I1125 09:43:46.345538 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:46 crc kubenswrapper[4734]: I1125 09:43:46.374219 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:47 crc kubenswrapper[4734]: I1125 09:43:47.067604 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmjdt" event={"ID":"630d60ba-b306-4278-b6bd-e4d1d46aaa18","Type":"ContainerStarted","Data":"3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20"} Nov 25 09:43:47 crc kubenswrapper[4734]: I1125 09:43:47.085569 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dmjdt" podStartSLOduration=2.640062136 podStartE2EDuration="4.085500395s" podCreationTimestamp="2025-11-25 09:43:43 +0000 UTC" firstStartedPulling="2025-11-25 09:43:45.054501904 +0000 UTC m=+947.864963898" lastFinishedPulling="2025-11-25 09:43:46.499940163 +0000 UTC m=+949.310402157" observedRunningTime="2025-11-25 09:43:47.084168386 +0000 UTC m=+949.894630400" watchObservedRunningTime="2025-11-25 09:43:47.085500395 +0000 UTC m=+949.895962389" Nov 25 09:43:47 crc kubenswrapper[4734]: I1125 09:43:47.091075 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-index-ffbkf" Nov 25 09:43:50 crc kubenswrapper[4734]: I1125 09:43:50.695769 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:43:50 crc kubenswrapper[4734]: I1125 09:43:50.696058 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:43:53 crc kubenswrapper[4734]: I1125 09:43:53.747956 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:53 crc kubenswrapper[4734]: I1125 09:43:53.748347 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:53 crc kubenswrapper[4734]: I1125 09:43:53.787381 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.046492 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l"] Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.047906 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.057456 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l"] Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.058771 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9j88k" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.149241 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.194135 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-bundle\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.194202 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-util\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.194291 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhdx2\" (UniqueName: \"kubernetes.io/projected/8d8e91f8-218d-4871-91e1-e807206738f4-kube-api-access-zhdx2\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.295310 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhdx2\" (UniqueName: \"kubernetes.io/projected/8d8e91f8-218d-4871-91e1-e807206738f4-kube-api-access-zhdx2\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.295371 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-bundle\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.295392 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-util\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.295851 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-util\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.296047 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-bundle\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.315905 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhdx2\" (UniqueName: \"kubernetes.io/projected/8d8e91f8-218d-4871-91e1-e807206738f4-kube-api-access-zhdx2\") pod \"5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.368769 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:54 crc kubenswrapper[4734]: I1125 09:43:54.786313 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l"] Nov 25 09:43:55 crc kubenswrapper[4734]: I1125 09:43:55.117918 4734 generic.go:334] "Generic (PLEG): container finished" podID="8d8e91f8-218d-4871-91e1-e807206738f4" containerID="5e1615cef43d67fcda380dce785b7d33d84483c9469902cdc5a42f1cf4d5512f" exitCode=0 Nov 25 09:43:55 crc kubenswrapper[4734]: I1125 09:43:55.117977 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" event={"ID":"8d8e91f8-218d-4871-91e1-e807206738f4","Type":"ContainerDied","Data":"5e1615cef43d67fcda380dce785b7d33d84483c9469902cdc5a42f1cf4d5512f"} Nov 25 09:43:55 crc kubenswrapper[4734]: I1125 09:43:55.118333 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" event={"ID":"8d8e91f8-218d-4871-91e1-e807206738f4","Type":"ContainerStarted","Data":"8443ed377c22876f92eec2ba65d7f2570d279775cb04c79146ee561033feec3c"} Nov 25 09:43:56 crc kubenswrapper[4734]: I1125 09:43:56.135038 4734 generic.go:334] "Generic (PLEG): container finished" podID="8d8e91f8-218d-4871-91e1-e807206738f4" containerID="bffb46fbda6fc76099651b0fadad3589e2612f814331a046a6bcca478615dfd2" exitCode=0 Nov 25 09:43:56 crc kubenswrapper[4734]: I1125 09:43:56.135163 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" event={"ID":"8d8e91f8-218d-4871-91e1-e807206738f4","Type":"ContainerDied","Data":"bffb46fbda6fc76099651b0fadad3589e2612f814331a046a6bcca478615dfd2"} Nov 25 09:43:57 crc kubenswrapper[4734]: I1125 09:43:57.144280 4734 generic.go:334] "Generic (PLEG): container finished" podID="8d8e91f8-218d-4871-91e1-e807206738f4" containerID="f716a816f86ded28f8e9d380e2ad952a2b16a6c84248dc491b7b9dee918b9c64" exitCode=0 Nov 25 09:43:57 crc kubenswrapper[4734]: I1125 09:43:57.145156 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" event={"ID":"8d8e91f8-218d-4871-91e1-e807206738f4","Type":"ContainerDied","Data":"f716a816f86ded28f8e9d380e2ad952a2b16a6c84248dc491b7b9dee918b9c64"} Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.204743 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dmjdt"] Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.205365 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dmjdt" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerName="registry-server" containerID="cri-o://3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20" gracePeriod=2 Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.390556 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.549643 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-bundle\") pod \"8d8e91f8-218d-4871-91e1-e807206738f4\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.549712 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-util\") pod \"8d8e91f8-218d-4871-91e1-e807206738f4\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.549770 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhdx2\" (UniqueName: \"kubernetes.io/projected/8d8e91f8-218d-4871-91e1-e807206738f4-kube-api-access-zhdx2\") pod \"8d8e91f8-218d-4871-91e1-e807206738f4\" (UID: \"8d8e91f8-218d-4871-91e1-e807206738f4\") " Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.550840 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-bundle" (OuterVolumeSpecName: "bundle") pod "8d8e91f8-218d-4871-91e1-e807206738f4" (UID: "8d8e91f8-218d-4871-91e1-e807206738f4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.556331 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d8e91f8-218d-4871-91e1-e807206738f4-kube-api-access-zhdx2" (OuterVolumeSpecName: "kube-api-access-zhdx2") pod "8d8e91f8-218d-4871-91e1-e807206738f4" (UID: "8d8e91f8-218d-4871-91e1-e807206738f4"). InnerVolumeSpecName "kube-api-access-zhdx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.569878 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-util" (OuterVolumeSpecName: "util") pod "8d8e91f8-218d-4871-91e1-e807206738f4" (UID: "8d8e91f8-218d-4871-91e1-e807206738f4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.608042 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.651369 4734 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.651414 4734 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8d8e91f8-218d-4871-91e1-e807206738f4-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.651429 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhdx2\" (UniqueName: \"kubernetes.io/projected/8d8e91f8-218d-4871-91e1-e807206738f4-kube-api-access-zhdx2\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.752609 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-utilities\") pod \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.752681 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-catalog-content\") pod \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.752789 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4nkj\" (UniqueName: \"kubernetes.io/projected/630d60ba-b306-4278-b6bd-e4d1d46aaa18-kube-api-access-d4nkj\") pod \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\" (UID: \"630d60ba-b306-4278-b6bd-e4d1d46aaa18\") " Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.753606 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-utilities" (OuterVolumeSpecName: "utilities") pod "630d60ba-b306-4278-b6bd-e4d1d46aaa18" (UID: "630d60ba-b306-4278-b6bd-e4d1d46aaa18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.757266 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/630d60ba-b306-4278-b6bd-e4d1d46aaa18-kube-api-access-d4nkj" (OuterVolumeSpecName: "kube-api-access-d4nkj") pod "630d60ba-b306-4278-b6bd-e4d1d46aaa18" (UID: "630d60ba-b306-4278-b6bd-e4d1d46aaa18"). InnerVolumeSpecName "kube-api-access-d4nkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.801411 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "630d60ba-b306-4278-b6bd-e4d1d46aaa18" (UID: "630d60ba-b306-4278-b6bd-e4d1d46aaa18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.854537 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.854574 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/630d60ba-b306-4278-b6bd-e4d1d46aaa18-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:58 crc kubenswrapper[4734]: I1125 09:43:58.854591 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4nkj\" (UniqueName: \"kubernetes.io/projected/630d60ba-b306-4278-b6bd-e4d1d46aaa18-kube-api-access-d4nkj\") on node \"crc\" DevicePath \"\"" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.155926 4734 generic.go:334] "Generic (PLEG): container finished" podID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerID="3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20" exitCode=0 Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.155988 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmjdt" event={"ID":"630d60ba-b306-4278-b6bd-e4d1d46aaa18","Type":"ContainerDied","Data":"3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20"} Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.156008 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmjdt" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.156026 4734 scope.go:117] "RemoveContainer" containerID="3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.156014 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmjdt" event={"ID":"630d60ba-b306-4278-b6bd-e4d1d46aaa18","Type":"ContainerDied","Data":"06bb1bd925d03e6a01e18cfa8b05d0b3a88a1627f0f1d1b93fe984e43ef98b9d"} Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.159539 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" event={"ID":"8d8e91f8-218d-4871-91e1-e807206738f4","Type":"ContainerDied","Data":"8443ed377c22876f92eec2ba65d7f2570d279775cb04c79146ee561033feec3c"} Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.159572 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8443ed377c22876f92eec2ba65d7f2570d279775cb04c79146ee561033feec3c" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.159634 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/5d473c3169f40b179d14921c90af2c8546b7b757fe551b7dba7d903f5dqqb8l" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.175359 4734 scope.go:117] "RemoveContainer" containerID="c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.189152 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dmjdt"] Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.192816 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dmjdt"] Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.194399 4734 scope.go:117] "RemoveContainer" containerID="f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.224119 4734 scope.go:117] "RemoveContainer" containerID="3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20" Nov 25 09:43:59 crc kubenswrapper[4734]: E1125 09:43:59.224429 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20\": container with ID starting with 3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20 not found: ID does not exist" containerID="3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.224474 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20"} err="failed to get container status \"3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20\": rpc error: code = NotFound desc = could not find container \"3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20\": container with ID starting with 3b62bfc45ba2bd5932dd471e7efe3aa1a8071440dc545d5abc645bb785afce20 not found: ID does not exist" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.224498 4734 scope.go:117] "RemoveContainer" containerID="c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830" Nov 25 09:43:59 crc kubenswrapper[4734]: E1125 09:43:59.224801 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830\": container with ID starting with c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830 not found: ID does not exist" containerID="c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.224827 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830"} err="failed to get container status \"c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830\": rpc error: code = NotFound desc = could not find container \"c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830\": container with ID starting with c30618ef4f41cc15107a33a96bfe744d0724977829ba18187aa0cb0f9b081830 not found: ID does not exist" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.224850 4734 scope.go:117] "RemoveContainer" containerID="f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855" Nov 25 09:43:59 crc kubenswrapper[4734]: E1125 09:43:59.225078 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855\": container with ID starting with f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855 not found: ID does not exist" containerID="f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855" Nov 25 09:43:59 crc kubenswrapper[4734]: I1125 09:43:59.225150 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855"} err="failed to get container status \"f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855\": rpc error: code = NotFound desc = could not find container \"f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855\": container with ID starting with f47da1bc04372647e5f59235a6c8cc405acac167f3aab6d3f9918288c1f8f855 not found: ID does not exist" Nov 25 09:44:00 crc kubenswrapper[4734]: I1125 09:44:00.256453 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" path="/var/lib/kubelet/pods/630d60ba-b306-4278-b6bd-e4d1d46aaa18/volumes" Nov 25 09:44:05 crc kubenswrapper[4734]: I1125 09:44:05.223033 4734 generic.go:334] "Generic (PLEG): container finished" podID="ae285841-6883-40fd-aa4c-dc13e1afdf95" containerID="cb4bfa871f9f584476931d0e330c8b0d41e81993189fdcea0d71d5ed616d55dd" exitCode=1 Nov 25 09:44:05 crc kubenswrapper[4734]: I1125 09:44:05.223247 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" event={"ID":"ae285841-6883-40fd-aa4c-dc13e1afdf95","Type":"ContainerDied","Data":"cb4bfa871f9f584476931d0e330c8b0d41e81993189fdcea0d71d5ed616d55dd"} Nov 25 09:44:05 crc kubenswrapper[4734]: I1125 09:44:05.224296 4734 scope.go:117] "RemoveContainer" containerID="cb4bfa871f9f584476931d0e330c8b0d41e81993189fdcea0d71d5ed616d55dd" Nov 25 09:44:06 crc kubenswrapper[4734]: I1125 09:44:06.232514 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" event={"ID":"ae285841-6883-40fd-aa4c-dc13e1afdf95","Type":"ContainerStarted","Data":"cf51382140cc95b0204a58895b7628e8d351e46c4718b09c604ff064c71bcd79"} Nov 25 09:44:06 crc kubenswrapper[4734]: I1125 09:44:06.232990 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.656991 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd"] Nov 25 09:44:07 crc kubenswrapper[4734]: E1125 09:44:07.657459 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d8e91f8-218d-4871-91e1-e807206738f4" containerName="util" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.657476 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d8e91f8-218d-4871-91e1-e807206738f4" containerName="util" Nov 25 09:44:07 crc kubenswrapper[4734]: E1125 09:44:07.657482 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d8e91f8-218d-4871-91e1-e807206738f4" containerName="pull" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.657489 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d8e91f8-218d-4871-91e1-e807206738f4" containerName="pull" Nov 25 09:44:07 crc kubenswrapper[4734]: E1125 09:44:07.657501 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerName="extract-utilities" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.657507 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerName="extract-utilities" Nov 25 09:44:07 crc kubenswrapper[4734]: E1125 09:44:07.657514 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerName="registry-server" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.657521 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerName="registry-server" Nov 25 09:44:07 crc kubenswrapper[4734]: E1125 09:44:07.657536 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d8e91f8-218d-4871-91e1-e807206738f4" containerName="extract" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.657541 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d8e91f8-218d-4871-91e1-e807206738f4" containerName="extract" Nov 25 09:44:07 crc kubenswrapper[4734]: E1125 09:44:07.657551 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerName="extract-content" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.657557 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerName="extract-content" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.657660 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="630d60ba-b306-4278-b6bd-e4d1d46aaa18" containerName="registry-server" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.657669 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d8e91f8-218d-4871-91e1-e807206738f4" containerName="extract" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.658308 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.660743 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-service-cert" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.665849 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-vv559" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.675962 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd"] Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.765558 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/842336e7-3fca-4ce9-b030-735f9fa84367-webhook-cert\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.765623 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/842336e7-3fca-4ce9-b030-735f9fa84367-apiservice-cert\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.765691 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8pcv\" (UniqueName: \"kubernetes.io/projected/842336e7-3fca-4ce9-b030-735f9fa84367-kube-api-access-w8pcv\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.867068 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/842336e7-3fca-4ce9-b030-735f9fa84367-webhook-cert\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.867150 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/842336e7-3fca-4ce9-b030-735f9fa84367-apiservice-cert\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.867176 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8pcv\" (UniqueName: \"kubernetes.io/projected/842336e7-3fca-4ce9-b030-735f9fa84367-kube-api-access-w8pcv\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.872549 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/842336e7-3fca-4ce9-b030-735f9fa84367-webhook-cert\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.874046 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/842336e7-3fca-4ce9-b030-735f9fa84367-apiservice-cert\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.884784 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8pcv\" (UniqueName: \"kubernetes.io/projected/842336e7-3fca-4ce9-b030-735f9fa84367-kube-api-access-w8pcv\") pod \"infra-operator-controller-manager-68875f9666-bkfdd\" (UID: \"842336e7-3fca-4ce9-b030-735f9fa84367\") " pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:07 crc kubenswrapper[4734]: I1125 09:44:07.974645 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:08 crc kubenswrapper[4734]: I1125 09:44:08.364652 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd"] Nov 25 09:44:09 crc kubenswrapper[4734]: I1125 09:44:09.253494 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" event={"ID":"842336e7-3fca-4ce9-b030-735f9fa84367","Type":"ContainerStarted","Data":"0d00080f64de00e280b189dd2739963ded5a7af76e46bdf39546b8a226d878f5"} Nov 25 09:44:11 crc kubenswrapper[4734]: I1125 09:44:11.266575 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" event={"ID":"842336e7-3fca-4ce9-b030-735f9fa84367","Type":"ContainerStarted","Data":"4aebda91f0ccf0a9298c9c33f02a6c93fbeeaa226d2fb90d9f62c649feba01fd"} Nov 25 09:44:15 crc kubenswrapper[4734]: I1125 09:44:15.291876 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" event={"ID":"842336e7-3fca-4ce9-b030-735f9fa84367","Type":"ContainerStarted","Data":"7098492451cc8e647a859a5547ae95abca6c73d6138f42874c696c9cd64fb9f1"} Nov 25 09:44:15 crc kubenswrapper[4734]: I1125 09:44:15.292796 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:15 crc kubenswrapper[4734]: I1125 09:44:15.298109 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 09:44:15 crc kubenswrapper[4734]: I1125 09:44:15.311881 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" podStartSLOduration=1.6541240259999999 podStartE2EDuration="8.311853279s" podCreationTimestamp="2025-11-25 09:44:07 +0000 UTC" firstStartedPulling="2025-11-25 09:44:08.372194237 +0000 UTC m=+971.182656231" lastFinishedPulling="2025-11-25 09:44:15.02992349 +0000 UTC m=+977.840385484" observedRunningTime="2025-11-25 09:44:15.311311683 +0000 UTC m=+978.121773687" watchObservedRunningTime="2025-11-25 09:44:15.311853279 +0000 UTC m=+978.122315283" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.067893 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/memcached-0"] Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.069066 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.071850 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"kube-root-ca.crt" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.071872 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"openshift-service-ca.crt" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.071879 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"memcached-config-data" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.072642 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"memcached-memcached-dockercfg-75mlz" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.090050 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/memcached-0"] Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.207608 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/71b4c45a-50ec-4be3-9378-76e249f3a7ac-config-data\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.207734 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnnbf\" (UniqueName: \"kubernetes.io/projected/71b4c45a-50ec-4be3-9378-76e249f3a7ac-kube-api-access-jnnbf\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.207782 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/71b4c45a-50ec-4be3-9378-76e249f3a7ac-kolla-config\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.308965 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/71b4c45a-50ec-4be3-9378-76e249f3a7ac-kolla-config\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.309055 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/71b4c45a-50ec-4be3-9378-76e249f3a7ac-config-data\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.309155 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnnbf\" (UniqueName: \"kubernetes.io/projected/71b4c45a-50ec-4be3-9378-76e249f3a7ac-kube-api-access-jnnbf\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.309943 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/71b4c45a-50ec-4be3-9378-76e249f3a7ac-kolla-config\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.310393 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/71b4c45a-50ec-4be3-9378-76e249f3a7ac-config-data\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.334758 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnnbf\" (UniqueName: \"kubernetes.io/projected/71b4c45a-50ec-4be3-9378-76e249f3a7ac-kube-api-access-jnnbf\") pod \"memcached-0\" (UID: \"71b4c45a-50ec-4be3-9378-76e249f3a7ac\") " pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.387169 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:17 crc kubenswrapper[4734]: I1125 09:44:17.624363 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/memcached-0"] Nov 25 09:44:18 crc kubenswrapper[4734]: I1125 09:44:18.310369 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/memcached-0" event={"ID":"71b4c45a-50ec-4be3-9378-76e249f3a7ac","Type":"ContainerStarted","Data":"82d267cbac87513d95d703ef41b5019e2b724caf1eef8b1ac207f7803297999f"} Nov 25 09:44:19 crc kubenswrapper[4734]: I1125 09:44:19.632025 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 09:44:20 crc kubenswrapper[4734]: I1125 09:44:20.696618 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:44:20 crc kubenswrapper[4734]: I1125 09:44:20.696932 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.904219 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/openstack-galera-1"] Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.905680 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.909477 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/openstack-galera-0"] Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.910598 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.911321 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"openstack-config-data" Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.911638 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"galera-openstack-dockercfg-jjfp6" Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.911827 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"openstack-scripts" Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.918868 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/openstack-galera-2"] Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.920289 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.934889 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/openstack-galera-1"] Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.957860 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/openstack-galera-2"] Nov 25 09:44:22 crc kubenswrapper[4734]: I1125 09:44:22.989424 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/openstack-galera-0"] Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039377 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-config-data-default\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039420 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-operator-scripts\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039444 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9b48\" (UniqueName: \"kubernetes.io/projected/c2c093a1-6bb8-4137-b678-69116438377c-kube-api-access-c9b48\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039463 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-config-data-default\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039484 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-operator-scripts\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039500 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-kolla-config\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039532 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b233a2b9-7c48-4d52-8896-4aa4b814605f-config-data-generated\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039546 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-kolla-config\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039566 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztck4\" (UniqueName: \"kubernetes.io/projected/b233a2b9-7c48-4d52-8896-4aa4b814605f-kube-api-access-ztck4\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039584 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-config-data-default\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039604 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039620 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039638 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039654 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-kolla-config\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039675 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039705 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pbdt\" (UniqueName: \"kubernetes.io/projected/c79a649b-3543-4b6f-8e59-caebefa31001-kube-api-access-9pbdt\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039739 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c79a649b-3543-4b6f-8e59-caebefa31001-config-data-generated\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.039763 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c2c093a1-6bb8-4137-b678-69116438377c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.140821 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pbdt\" (UniqueName: \"kubernetes.io/projected/c79a649b-3543-4b6f-8e59-caebefa31001-kube-api-access-9pbdt\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.140886 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c79a649b-3543-4b6f-8e59-caebefa31001-config-data-generated\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.140916 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c2c093a1-6bb8-4137-b678-69116438377c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.140938 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-config-data-default\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.140954 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-operator-scripts\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.140973 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9b48\" (UniqueName: \"kubernetes.io/projected/c2c093a1-6bb8-4137-b678-69116438377c-kube-api-access-c9b48\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.140999 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-config-data-default\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141032 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-operator-scripts\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141056 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-kolla-config\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141115 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b233a2b9-7c48-4d52-8896-4aa4b814605f-config-data-generated\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141134 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-kolla-config\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141154 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztck4\" (UniqueName: \"kubernetes.io/projected/b233a2b9-7c48-4d52-8896-4aa4b814605f-kube-api-access-ztck4\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141175 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-config-data-default\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141198 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141266 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141289 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141307 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-kolla-config\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141334 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141504 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c79a649b-3543-4b6f-8e59-caebefa31001-config-data-generated\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141816 4734 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") device mount path \"/mnt/openstack/pv09\"" pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.142219 4734 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") device mount path \"/mnt/openstack/pv07\"" pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.142451 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-config-data-default\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.142629 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-operator-scripts\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.141824 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/c2c093a1-6bb8-4137-b678-69116438377c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.143030 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.143162 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-kolla-config\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.143170 4734 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") device mount path \"/mnt/openstack/pv12\"" pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.143318 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c2c093a1-6bb8-4137-b678-69116438377c-config-data-default\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.143841 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-kolla-config\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.144016 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-config-data-default\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.144417 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c79a649b-3543-4b6f-8e59-caebefa31001-operator-scripts\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.146535 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b233a2b9-7c48-4d52-8896-4aa4b814605f-kolla-config\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.146856 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b233a2b9-7c48-4d52-8896-4aa4b814605f-config-data-generated\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.162228 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.162750 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.165476 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztck4\" (UniqueName: \"kubernetes.io/projected/b233a2b9-7c48-4d52-8896-4aa4b814605f-kube-api-access-ztck4\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.175156 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9b48\" (UniqueName: \"kubernetes.io/projected/c2c093a1-6bb8-4137-b678-69116438377c-kube-api-access-c9b48\") pod \"openstack-galera-0\" (UID: \"c2c093a1-6bb8-4137-b678-69116438377c\") " pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.175485 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-2\" (UID: \"b233a2b9-7c48-4d52-8896-4aa4b814605f\") " pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.189068 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pbdt\" (UniqueName: \"kubernetes.io/projected/c79a649b-3543-4b6f-8e59-caebefa31001-kube-api-access-9pbdt\") pod \"openstack-galera-1\" (UID: \"c79a649b-3543-4b6f-8e59-caebefa31001\") " pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.232550 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.247559 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:23 crc kubenswrapper[4734]: I1125 09:44:23.255006 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.010826 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5xxjs"] Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.013734 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.016525 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-index-dockercfg-x9h95" Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.028977 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5xxjs"] Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.161629 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcdqw\" (UniqueName: \"kubernetes.io/projected/fd08caf7-b5ce-439d-a8df-9582554dc07a-kube-api-access-mcdqw\") pod \"rabbitmq-cluster-operator-index-5xxjs\" (UID: \"fd08caf7-b5ce-439d-a8df-9582554dc07a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.263064 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcdqw\" (UniqueName: \"kubernetes.io/projected/fd08caf7-b5ce-439d-a8df-9582554dc07a-kube-api-access-mcdqw\") pod \"rabbitmq-cluster-operator-index-5xxjs\" (UID: \"fd08caf7-b5ce-439d-a8df-9582554dc07a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.286037 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcdqw\" (UniqueName: \"kubernetes.io/projected/fd08caf7-b5ce-439d-a8df-9582554dc07a-kube-api-access-mcdqw\") pod \"rabbitmq-cluster-operator-index-5xxjs\" (UID: \"fd08caf7-b5ce-439d-a8df-9582554dc07a\") " pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.398596 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.677831 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/openstack-galera-2"] Nov 25 09:44:24 crc kubenswrapper[4734]: W1125 09:44:24.684458 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb233a2b9_7c48_4d52_8896_4aa4b814605f.slice/crio-6adf4fb5476f565369a7aaa68c2dd50411c5a82b02b11d79fa2e8a84023ba8cd WatchSource:0}: Error finding container 6adf4fb5476f565369a7aaa68c2dd50411c5a82b02b11d79fa2e8a84023ba8cd: Status 404 returned error can't find the container with id 6adf4fb5476f565369a7aaa68c2dd50411c5a82b02b11d79fa2e8a84023ba8cd Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.721957 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/openstack-galera-0"] Nov 25 09:44:24 crc kubenswrapper[4734]: W1125 09:44:24.729935 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2c093a1_6bb8_4137_b678_69116438377c.slice/crio-303b014daa07b5cd6238ddd3d0c0807380852d07cb61b6043d86fa845cf374a1 WatchSource:0}: Error finding container 303b014daa07b5cd6238ddd3d0c0807380852d07cb61b6043d86fa845cf374a1: Status 404 returned error can't find the container with id 303b014daa07b5cd6238ddd3d0c0807380852d07cb61b6043d86fa845cf374a1 Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.826144 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/openstack-galera-1"] Nov 25 09:44:24 crc kubenswrapper[4734]: I1125 09:44:24.963382 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5xxjs"] Nov 25 09:44:25 crc kubenswrapper[4734]: I1125 09:44:25.382378 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/memcached-0" event={"ID":"71b4c45a-50ec-4be3-9378-76e249f3a7ac","Type":"ContainerStarted","Data":"e46404965c3e7edd2963b568941793f7cbd9f7f9386d87c6acf6c8b5ad17844f"} Nov 25 09:44:25 crc kubenswrapper[4734]: I1125 09:44:25.383253 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:25 crc kubenswrapper[4734]: I1125 09:44:25.385696 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-2" event={"ID":"b233a2b9-7c48-4d52-8896-4aa4b814605f","Type":"ContainerStarted","Data":"6adf4fb5476f565369a7aaa68c2dd50411c5a82b02b11d79fa2e8a84023ba8cd"} Nov 25 09:44:25 crc kubenswrapper[4734]: I1125 09:44:25.387491 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" event={"ID":"fd08caf7-b5ce-439d-a8df-9582554dc07a","Type":"ContainerStarted","Data":"ea35b1e2dad33a2df332026322ff44d7e801db000b02b5b8ae43a64a84c84711"} Nov 25 09:44:25 crc kubenswrapper[4734]: I1125 09:44:25.396019 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-0" event={"ID":"c2c093a1-6bb8-4137-b678-69116438377c","Type":"ContainerStarted","Data":"303b014daa07b5cd6238ddd3d0c0807380852d07cb61b6043d86fa845cf374a1"} Nov 25 09:44:25 crc kubenswrapper[4734]: I1125 09:44:25.411813 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-1" event={"ID":"c79a649b-3543-4b6f-8e59-caebefa31001","Type":"ContainerStarted","Data":"73f27a87ad57eca1290893da614d2be25466974ef5d406a8bb51090d7004b36f"} Nov 25 09:44:28 crc kubenswrapper[4734]: I1125 09:44:28.796478 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/memcached-0" podStartSLOduration=5.006523651 podStartE2EDuration="11.796462437s" podCreationTimestamp="2025-11-25 09:44:17 +0000 UTC" firstStartedPulling="2025-11-25 09:44:17.63137901 +0000 UTC m=+980.441840994" lastFinishedPulling="2025-11-25 09:44:24.421317786 +0000 UTC m=+987.231779780" observedRunningTime="2025-11-25 09:44:25.415402065 +0000 UTC m=+988.225864059" watchObservedRunningTime="2025-11-25 09:44:28.796462437 +0000 UTC m=+991.606924431" Nov 25 09:44:28 crc kubenswrapper[4734]: I1125 09:44:28.797744 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5xxjs"] Nov 25 09:44:29 crc kubenswrapper[4734]: I1125 09:44:29.404670 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-zpkc8"] Nov 25 09:44:29 crc kubenswrapper[4734]: I1125 09:44:29.405772 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:29 crc kubenswrapper[4734]: I1125 09:44:29.414128 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-zpkc8"] Nov 25 09:44:29 crc kubenswrapper[4734]: I1125 09:44:29.544489 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7sqs\" (UniqueName: \"kubernetes.io/projected/4a78f462-c9e1-48e7-a4d1-f459bef3ae77-kube-api-access-v7sqs\") pod \"rabbitmq-cluster-operator-index-zpkc8\" (UID: \"4a78f462-c9e1-48e7-a4d1-f459bef3ae77\") " pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:29 crc kubenswrapper[4734]: I1125 09:44:29.645620 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7sqs\" (UniqueName: \"kubernetes.io/projected/4a78f462-c9e1-48e7-a4d1-f459bef3ae77-kube-api-access-v7sqs\") pod \"rabbitmq-cluster-operator-index-zpkc8\" (UID: \"4a78f462-c9e1-48e7-a4d1-f459bef3ae77\") " pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:29 crc kubenswrapper[4734]: I1125 09:44:29.671284 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7sqs\" (UniqueName: \"kubernetes.io/projected/4a78f462-c9e1-48e7-a4d1-f459bef3ae77-kube-api-access-v7sqs\") pod \"rabbitmq-cluster-operator-index-zpkc8\" (UID: \"4a78f462-c9e1-48e7-a4d1-f459bef3ae77\") " pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:29 crc kubenswrapper[4734]: I1125 09:44:29.726146 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.115469 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-zpkc8"] Nov 25 09:44:31 crc kubenswrapper[4734]: W1125 09:44:31.123674 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a78f462_c9e1_48e7_a4d1_f459bef3ae77.slice/crio-e8078316e4eca46f74b6d0f5076058fbdf865d479f7e234ad8744773e137468d WatchSource:0}: Error finding container e8078316e4eca46f74b6d0f5076058fbdf865d479f7e234ad8744773e137468d: Status 404 returned error can't find the container with id e8078316e4eca46f74b6d0f5076058fbdf865d479f7e234ad8744773e137468d Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.454287 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-0" event={"ID":"c2c093a1-6bb8-4137-b678-69116438377c","Type":"ContainerStarted","Data":"c64cfc0384b84d1097a52109628439415d5d20f1fb7897b4892ecb7dad07c0af"} Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.455942 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-1" event={"ID":"c79a649b-3543-4b6f-8e59-caebefa31001","Type":"ContainerStarted","Data":"291daab09c18d89e71fe2443db8e4bdfa1126e4c97e20e46f92aa9e038cb63ae"} Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.457572 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-2" event={"ID":"b233a2b9-7c48-4d52-8896-4aa4b814605f","Type":"ContainerStarted","Data":"ae4a1c9587af03316a9850e219871dbfde94cde2c3d6e03f23f93a9f141fbdbd"} Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.458565 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" event={"ID":"4a78f462-c9e1-48e7-a4d1-f459bef3ae77","Type":"ContainerStarted","Data":"e8078316e4eca46f74b6d0f5076058fbdf865d479f7e234ad8744773e137468d"} Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.460337 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" event={"ID":"fd08caf7-b5ce-439d-a8df-9582554dc07a","Type":"ContainerStarted","Data":"bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e"} Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.460405 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" podUID="fd08caf7-b5ce-439d-a8df-9582554dc07a" containerName="registry-server" containerID="cri-o://bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e" gracePeriod=2 Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.491856 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" podStartSLOduration=2.631381476 podStartE2EDuration="8.491826471s" podCreationTimestamp="2025-11-25 09:44:23 +0000 UTC" firstStartedPulling="2025-11-25 09:44:24.96862866 +0000 UTC m=+987.779090654" lastFinishedPulling="2025-11-25 09:44:30.829073655 +0000 UTC m=+993.639535649" observedRunningTime="2025-11-25 09:44:31.490120851 +0000 UTC m=+994.300582855" watchObservedRunningTime="2025-11-25 09:44:31.491826471 +0000 UTC m=+994.302288465" Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.880979 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.979340 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcdqw\" (UniqueName: \"kubernetes.io/projected/fd08caf7-b5ce-439d-a8df-9582554dc07a-kube-api-access-mcdqw\") pod \"fd08caf7-b5ce-439d-a8df-9582554dc07a\" (UID: \"fd08caf7-b5ce-439d-a8df-9582554dc07a\") " Nov 25 09:44:31 crc kubenswrapper[4734]: I1125 09:44:31.984738 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd08caf7-b5ce-439d-a8df-9582554dc07a-kube-api-access-mcdqw" (OuterVolumeSpecName: "kube-api-access-mcdqw") pod "fd08caf7-b5ce-439d-a8df-9582554dc07a" (UID: "fd08caf7-b5ce-439d-a8df-9582554dc07a"). InnerVolumeSpecName "kube-api-access-mcdqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.081240 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcdqw\" (UniqueName: \"kubernetes.io/projected/fd08caf7-b5ce-439d-a8df-9582554dc07a-kube-api-access-mcdqw\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.388870 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/memcached-0" Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.467737 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" event={"ID":"4a78f462-c9e1-48e7-a4d1-f459bef3ae77","Type":"ContainerStarted","Data":"c1f029af13fdb25c55ec2343f7bacf86fc02975687dafe61c49bcd5818b1aae5"} Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.469459 4734 generic.go:334] "Generic (PLEG): container finished" podID="fd08caf7-b5ce-439d-a8df-9582554dc07a" containerID="bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e" exitCode=0 Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.469883 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.470305 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" event={"ID":"fd08caf7-b5ce-439d-a8df-9582554dc07a","Type":"ContainerDied","Data":"bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e"} Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.470414 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-index-5xxjs" event={"ID":"fd08caf7-b5ce-439d-a8df-9582554dc07a","Type":"ContainerDied","Data":"ea35b1e2dad33a2df332026322ff44d7e801db000b02b5b8ae43a64a84c84711"} Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.470448 4734 scope.go:117] "RemoveContainer" containerID="bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e" Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.486476 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" podStartSLOduration=3.057049839 podStartE2EDuration="3.486455786s" podCreationTimestamp="2025-11-25 09:44:29 +0000 UTC" firstStartedPulling="2025-11-25 09:44:31.12834732 +0000 UTC m=+993.938809314" lastFinishedPulling="2025-11-25 09:44:31.557753267 +0000 UTC m=+994.368215261" observedRunningTime="2025-11-25 09:44:32.485455807 +0000 UTC m=+995.295917801" watchObservedRunningTime="2025-11-25 09:44:32.486455786 +0000 UTC m=+995.296917780" Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.500531 4734 scope.go:117] "RemoveContainer" containerID="bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e" Nov 25 09:44:32 crc kubenswrapper[4734]: E1125 09:44:32.501711 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e\": container with ID starting with bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e not found: ID does not exist" containerID="bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e" Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.501763 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e"} err="failed to get container status \"bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e\": rpc error: code = NotFound desc = could not find container \"bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e\": container with ID starting with bbba2496ac5b5982994b663b86390ce4a22b9607d223a9f1a91a945415e3861e not found: ID does not exist" Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.503446 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5xxjs"] Nov 25 09:44:32 crc kubenswrapper[4734]: I1125 09:44:32.512214 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-index-5xxjs"] Nov 25 09:44:34 crc kubenswrapper[4734]: I1125 09:44:34.253692 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd08caf7-b5ce-439d-a8df-9582554dc07a" path="/var/lib/kubelet/pods/fd08caf7-b5ce-439d-a8df-9582554dc07a/volumes" Nov 25 09:44:35 crc kubenswrapper[4734]: I1125 09:44:35.495838 4734 generic.go:334] "Generic (PLEG): container finished" podID="c79a649b-3543-4b6f-8e59-caebefa31001" containerID="291daab09c18d89e71fe2443db8e4bdfa1126e4c97e20e46f92aa9e038cb63ae" exitCode=0 Nov 25 09:44:35 crc kubenswrapper[4734]: I1125 09:44:35.495930 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-1" event={"ID":"c79a649b-3543-4b6f-8e59-caebefa31001","Type":"ContainerDied","Data":"291daab09c18d89e71fe2443db8e4bdfa1126e4c97e20e46f92aa9e038cb63ae"} Nov 25 09:44:35 crc kubenswrapper[4734]: I1125 09:44:35.498343 4734 generic.go:334] "Generic (PLEG): container finished" podID="b233a2b9-7c48-4d52-8896-4aa4b814605f" containerID="ae4a1c9587af03316a9850e219871dbfde94cde2c3d6e03f23f93a9f141fbdbd" exitCode=0 Nov 25 09:44:35 crc kubenswrapper[4734]: I1125 09:44:35.498443 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-2" event={"ID":"b233a2b9-7c48-4d52-8896-4aa4b814605f","Type":"ContainerDied","Data":"ae4a1c9587af03316a9850e219871dbfde94cde2c3d6e03f23f93a9f141fbdbd"} Nov 25 09:44:35 crc kubenswrapper[4734]: I1125 09:44:35.501486 4734 generic.go:334] "Generic (PLEG): container finished" podID="c2c093a1-6bb8-4137-b678-69116438377c" containerID="c64cfc0384b84d1097a52109628439415d5d20f1fb7897b4892ecb7dad07c0af" exitCode=0 Nov 25 09:44:35 crc kubenswrapper[4734]: I1125 09:44:35.501530 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-0" event={"ID":"c2c093a1-6bb8-4137-b678-69116438377c","Type":"ContainerDied","Data":"c64cfc0384b84d1097a52109628439415d5d20f1fb7897b4892ecb7dad07c0af"} Nov 25 09:44:36 crc kubenswrapper[4734]: I1125 09:44:36.509172 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-2" event={"ID":"b233a2b9-7c48-4d52-8896-4aa4b814605f","Type":"ContainerStarted","Data":"afe4ed625064a3d7b0a81fb46364f2cb5ecf4f50524c1c5ebff749d341711637"} Nov 25 09:44:36 crc kubenswrapper[4734]: I1125 09:44:36.510788 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-0" event={"ID":"c2c093a1-6bb8-4137-b678-69116438377c","Type":"ContainerStarted","Data":"610e0b23071c5b259ad0320ad373fa65a7f7c883f8509a3ba1d8e35b0862ad2a"} Nov 25 09:44:36 crc kubenswrapper[4734]: I1125 09:44:36.512630 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstack-galera-1" event={"ID":"c79a649b-3543-4b6f-8e59-caebefa31001","Type":"ContainerStarted","Data":"45bf98ec2c9645c77ded39d6c28c4f659f61d9b953c37b957955e57c455dbdf3"} Nov 25 09:44:36 crc kubenswrapper[4734]: I1125 09:44:36.532537 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/openstack-galera-2" podStartSLOduration=9.499323458 podStartE2EDuration="15.532518621s" podCreationTimestamp="2025-11-25 09:44:21 +0000 UTC" firstStartedPulling="2025-11-25 09:44:24.694668694 +0000 UTC m=+987.505130698" lastFinishedPulling="2025-11-25 09:44:30.727863867 +0000 UTC m=+993.538325861" observedRunningTime="2025-11-25 09:44:36.526154395 +0000 UTC m=+999.336616389" watchObservedRunningTime="2025-11-25 09:44:36.532518621 +0000 UTC m=+999.342980615" Nov 25 09:44:36 crc kubenswrapper[4734]: I1125 09:44:36.546549 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/openstack-galera-0" podStartSLOduration=9.552747968 podStartE2EDuration="15.54652882s" podCreationTimestamp="2025-11-25 09:44:21 +0000 UTC" firstStartedPulling="2025-11-25 09:44:24.731474559 +0000 UTC m=+987.541936553" lastFinishedPulling="2025-11-25 09:44:30.725255411 +0000 UTC m=+993.535717405" observedRunningTime="2025-11-25 09:44:36.543796971 +0000 UTC m=+999.354258975" watchObservedRunningTime="2025-11-25 09:44:36.54652882 +0000 UTC m=+999.356990824" Nov 25 09:44:36 crc kubenswrapper[4734]: I1125 09:44:36.562626 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/openstack-galera-1" podStartSLOduration=9.736507219 podStartE2EDuration="15.56260424s" podCreationTimestamp="2025-11-25 09:44:21 +0000 UTC" firstStartedPulling="2025-11-25 09:44:24.833763539 +0000 UTC m=+987.644225543" lastFinishedPulling="2025-11-25 09:44:30.65986057 +0000 UTC m=+993.470322564" observedRunningTime="2025-11-25 09:44:36.559125358 +0000 UTC m=+999.369587352" watchObservedRunningTime="2025-11-25 09:44:36.56260424 +0000 UTC m=+999.373066234" Nov 25 09:44:39 crc kubenswrapper[4734]: I1125 09:44:39.726959 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:39 crc kubenswrapper[4734]: I1125 09:44:39.727249 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:39 crc kubenswrapper[4734]: I1125 09:44:39.752767 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:40 crc kubenswrapper[4734]: I1125 09:44:40.563503 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/rabbitmq-cluster-operator-index-zpkc8" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.457192 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g"] Nov 25 09:44:41 crc kubenswrapper[4734]: E1125 09:44:41.458024 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd08caf7-b5ce-439d-a8df-9582554dc07a" containerName="registry-server" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.458048 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd08caf7-b5ce-439d-a8df-9582554dc07a" containerName="registry-server" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.458243 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd08caf7-b5ce-439d-a8df-9582554dc07a" containerName="registry-server" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.459497 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.461492 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9j88k" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.470025 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g"] Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.519124 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.519183 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.519248 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpsxf\" (UniqueName: \"kubernetes.io/projected/2144a9a2-0e34-4398-9789-46f70449d569-kube-api-access-xpsxf\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.620380 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpsxf\" (UniqueName: \"kubernetes.io/projected/2144a9a2-0e34-4398-9789-46f70449d569-kube-api-access-xpsxf\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.620489 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.620524 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.620993 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-util\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.621015 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-bundle\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.640409 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpsxf\" (UniqueName: \"kubernetes.io/projected/2144a9a2-0e34-4398-9789-46f70449d569-kube-api-access-xpsxf\") pod \"9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:41 crc kubenswrapper[4734]: I1125 09:44:41.822751 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:42 crc kubenswrapper[4734]: I1125 09:44:42.229769 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g"] Nov 25 09:44:42 crc kubenswrapper[4734]: W1125 09:44:42.245835 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2144a9a2_0e34_4398_9789_46f70449d569.slice/crio-9c844eca41fb9e76dda6e8b24d60ec02163efd274421bba3ebaf22039d435072 WatchSource:0}: Error finding container 9c844eca41fb9e76dda6e8b24d60ec02163efd274421bba3ebaf22039d435072: Status 404 returned error can't find the container with id 9c844eca41fb9e76dda6e8b24d60ec02163efd274421bba3ebaf22039d435072 Nov 25 09:44:42 crc kubenswrapper[4734]: I1125 09:44:42.550859 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" event={"ID":"2144a9a2-0e34-4398-9789-46f70449d569","Type":"ContainerStarted","Data":"9c844eca41fb9e76dda6e8b24d60ec02163efd274421bba3ebaf22039d435072"} Nov 25 09:44:43 crc kubenswrapper[4734]: I1125 09:44:43.233208 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:43 crc kubenswrapper[4734]: I1125 09:44:43.233270 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:43 crc kubenswrapper[4734]: I1125 09:44:43.248747 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:43 crc kubenswrapper[4734]: I1125 09:44:43.248803 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:44:43 crc kubenswrapper[4734]: I1125 09:44:43.256176 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:43 crc kubenswrapper[4734]: I1125 09:44:43.256551 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:44 crc kubenswrapper[4734]: I1125 09:44:44.562433 4734 generic.go:334] "Generic (PLEG): container finished" podID="2144a9a2-0e34-4398-9789-46f70449d569" containerID="c426541b421dd18121f10fb5bae9c64110644db2477574cfd1099a2d71913b44" exitCode=0 Nov 25 09:44:44 crc kubenswrapper[4734]: I1125 09:44:44.562474 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" event={"ID":"2144a9a2-0e34-4398-9789-46f70449d569","Type":"ContainerDied","Data":"c426541b421dd18121f10fb5bae9c64110644db2477574cfd1099a2d71913b44"} Nov 25 09:44:45 crc kubenswrapper[4734]: I1125 09:44:45.403730 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:45 crc kubenswrapper[4734]: I1125 09:44:45.475307 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/openstack-galera-2" Nov 25 09:44:45 crc kubenswrapper[4734]: E1125 09:44:45.679271 4734 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.5:38924->38.102.83.5:36341: write tcp 192.168.126.11:10250->192.168.126.11:37464: write: broken pipe Nov 25 09:44:46 crc kubenswrapper[4734]: I1125 09:44:46.575274 4734 generic.go:334] "Generic (PLEG): container finished" podID="2144a9a2-0e34-4398-9789-46f70449d569" containerID="df672a238953dd93bcc08844bb00179052172a4a0c7ec55c7a27c9ebb361ab20" exitCode=0 Nov 25 09:44:46 crc kubenswrapper[4734]: I1125 09:44:46.575388 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" event={"ID":"2144a9a2-0e34-4398-9789-46f70449d569","Type":"ContainerDied","Data":"df672a238953dd93bcc08844bb00179052172a4a0c7ec55c7a27c9ebb361ab20"} Nov 25 09:44:47 crc kubenswrapper[4734]: I1125 09:44:47.582770 4734 generic.go:334] "Generic (PLEG): container finished" podID="2144a9a2-0e34-4398-9789-46f70449d569" containerID="aa24ea9598681598f5722d639b12ee6942aaa21f49a765cb25308884e82f19e7" exitCode=0 Nov 25 09:44:47 crc kubenswrapper[4734]: I1125 09:44:47.582811 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" event={"ID":"2144a9a2-0e34-4398-9789-46f70449d569","Type":"ContainerDied","Data":"aa24ea9598681598f5722d639b12ee6942aaa21f49a765cb25308884e82f19e7"} Nov 25 09:44:48 crc kubenswrapper[4734]: I1125 09:44:48.854119 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:48 crc kubenswrapper[4734]: I1125 09:44:48.926346 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpsxf\" (UniqueName: \"kubernetes.io/projected/2144a9a2-0e34-4398-9789-46f70449d569-kube-api-access-xpsxf\") pod \"2144a9a2-0e34-4398-9789-46f70449d569\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " Nov 25 09:44:48 crc kubenswrapper[4734]: I1125 09:44:48.926415 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-bundle\") pod \"2144a9a2-0e34-4398-9789-46f70449d569\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " Nov 25 09:44:48 crc kubenswrapper[4734]: I1125 09:44:48.926561 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-util\") pod \"2144a9a2-0e34-4398-9789-46f70449d569\" (UID: \"2144a9a2-0e34-4398-9789-46f70449d569\") " Nov 25 09:44:48 crc kubenswrapper[4734]: I1125 09:44:48.927900 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-bundle" (OuterVolumeSpecName: "bundle") pod "2144a9a2-0e34-4398-9789-46f70449d569" (UID: "2144a9a2-0e34-4398-9789-46f70449d569"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:48 crc kubenswrapper[4734]: I1125 09:44:48.933263 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2144a9a2-0e34-4398-9789-46f70449d569-kube-api-access-xpsxf" (OuterVolumeSpecName: "kube-api-access-xpsxf") pod "2144a9a2-0e34-4398-9789-46f70449d569" (UID: "2144a9a2-0e34-4398-9789-46f70449d569"). InnerVolumeSpecName "kube-api-access-xpsxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:48 crc kubenswrapper[4734]: I1125 09:44:48.944531 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-util" (OuterVolumeSpecName: "util") pod "2144a9a2-0e34-4398-9789-46f70449d569" (UID: "2144a9a2-0e34-4398-9789-46f70449d569"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:49 crc kubenswrapper[4734]: I1125 09:44:49.028994 4734 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:49 crc kubenswrapper[4734]: I1125 09:44:49.029039 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpsxf\" (UniqueName: \"kubernetes.io/projected/2144a9a2-0e34-4398-9789-46f70449d569-kube-api-access-xpsxf\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:49 crc kubenswrapper[4734]: I1125 09:44:49.029053 4734 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2144a9a2-0e34-4398-9789-46f70449d569-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:49 crc kubenswrapper[4734]: I1125 09:44:49.595997 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" event={"ID":"2144a9a2-0e34-4398-9789-46f70449d569","Type":"ContainerDied","Data":"9c844eca41fb9e76dda6e8b24d60ec02163efd274421bba3ebaf22039d435072"} Nov 25 09:44:49 crc kubenswrapper[4734]: I1125 09:44:49.596033 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c844eca41fb9e76dda6e8b24d60ec02163efd274421bba3ebaf22039d435072" Nov 25 09:44:49 crc kubenswrapper[4734]: I1125 09:44:49.596039 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9704761d240e56fb98655ffd81084895b33a73ec711f4dcdef0450e5906vq4g" Nov 25 09:44:50 crc kubenswrapper[4734]: I1125 09:44:50.699140 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:44:50 crc kubenswrapper[4734]: I1125 09:44:50.699280 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:44:50 crc kubenswrapper[4734]: I1125 09:44:50.699363 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:44:50 crc kubenswrapper[4734]: I1125 09:44:50.700374 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3b8d398052ed40cef3b469389e339a6739d1c3bd141a5a0198cc0270e0a8927e"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:44:50 crc kubenswrapper[4734]: I1125 09:44:50.700493 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://3b8d398052ed40cef3b469389e339a6739d1c3bd141a5a0198cc0270e0a8927e" gracePeriod=600 Nov 25 09:44:51 crc kubenswrapper[4734]: I1125 09:44:51.611159 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="3b8d398052ed40cef3b469389e339a6739d1c3bd141a5a0198cc0270e0a8927e" exitCode=0 Nov 25 09:44:51 crc kubenswrapper[4734]: I1125 09:44:51.611224 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"3b8d398052ed40cef3b469389e339a6739d1c3bd141a5a0198cc0270e0a8927e"} Nov 25 09:44:51 crc kubenswrapper[4734]: I1125 09:44:51.611513 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"b897de4eab07f171dfe6b1c309559bbe3bc70bbec2b52f3e83fb0315718594f5"} Nov 25 09:44:51 crc kubenswrapper[4734]: I1125 09:44:51.611537 4734 scope.go:117] "RemoveContainer" containerID="0a9c180e121b5cb8f4c6eca924f66d6e13dbe5f0459c72e12dd199f92062a51f" Nov 25 09:44:53 crc kubenswrapper[4734]: I1125 09:44:53.314562 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="keystone-kuttl-tests/openstack-galera-2" podUID="b233a2b9-7c48-4d52-8896-4aa4b814605f" containerName="galera" probeResult="failure" output=< Nov 25 09:44:53 crc kubenswrapper[4734]: wsrep_local_state_comment (Donor/Desynced) differs from Synced Nov 25 09:44:53 crc kubenswrapper[4734]: > Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.569539 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k"] Nov 25 09:44:55 crc kubenswrapper[4734]: E1125 09:44:55.570041 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2144a9a2-0e34-4398-9789-46f70449d569" containerName="extract" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.570053 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="2144a9a2-0e34-4398-9789-46f70449d569" containerName="extract" Nov 25 09:44:55 crc kubenswrapper[4734]: E1125 09:44:55.570066 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2144a9a2-0e34-4398-9789-46f70449d569" containerName="pull" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.570073 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="2144a9a2-0e34-4398-9789-46f70449d569" containerName="pull" Nov 25 09:44:55 crc kubenswrapper[4734]: E1125 09:44:55.570097 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2144a9a2-0e34-4398-9789-46f70449d569" containerName="util" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.570106 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="2144a9a2-0e34-4398-9789-46f70449d569" containerName="util" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.570209 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="2144a9a2-0e34-4398-9789-46f70449d569" containerName="extract" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.570634 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.573765 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-dockercfg-p2xtm" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.613186 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k"] Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.665378 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76fm8\" (UniqueName: \"kubernetes.io/projected/d552db43-2924-4931-a15d-f8803531210f-kube-api-access-76fm8\") pod \"rabbitmq-cluster-operator-779fc9694b-7tv4k\" (UID: \"d552db43-2924-4931-a15d-f8803531210f\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.767517 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76fm8\" (UniqueName: \"kubernetes.io/projected/d552db43-2924-4931-a15d-f8803531210f-kube-api-access-76fm8\") pod \"rabbitmq-cluster-operator-779fc9694b-7tv4k\" (UID: \"d552db43-2924-4931-a15d-f8803531210f\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.792950 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76fm8\" (UniqueName: \"kubernetes.io/projected/d552db43-2924-4931-a15d-f8803531210f-kube-api-access-76fm8\") pod \"rabbitmq-cluster-operator-779fc9694b-7tv4k\" (UID: \"d552db43-2924-4931-a15d-f8803531210f\") " pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" Nov 25 09:44:55 crc kubenswrapper[4734]: I1125 09:44:55.888939 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" Nov 25 09:44:56 crc kubenswrapper[4734]: I1125 09:44:56.332286 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k"] Nov 25 09:44:56 crc kubenswrapper[4734]: I1125 09:44:56.651273 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" event={"ID":"d552db43-2924-4931-a15d-f8803531210f","Type":"ContainerStarted","Data":"3bddd68624965f9b9375b96fa8a810f646907afff4402b195b22566a2d78d36d"} Nov 25 09:44:58 crc kubenswrapper[4734]: I1125 09:44:58.280888 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:58 crc kubenswrapper[4734]: I1125 09:44:58.354144 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/openstack-galera-1" Nov 25 09:44:59 crc kubenswrapper[4734]: I1125 09:44:59.976238 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.053394 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/openstack-galera-0" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.132016 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265"] Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.132828 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.135324 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.135513 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.149100 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265"] Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.242232 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f3bf2393-e9a6-4567-b77a-4913f30ce267-secret-volume\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.242287 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chhfg\" (UniqueName: \"kubernetes.io/projected/f3bf2393-e9a6-4567-b77a-4913f30ce267-kube-api-access-chhfg\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.242377 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f3bf2393-e9a6-4567-b77a-4913f30ce267-config-volume\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.343767 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f3bf2393-e9a6-4567-b77a-4913f30ce267-config-volume\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.344250 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f3bf2393-e9a6-4567-b77a-4913f30ce267-secret-volume\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.344281 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chhfg\" (UniqueName: \"kubernetes.io/projected/f3bf2393-e9a6-4567-b77a-4913f30ce267-kube-api-access-chhfg\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.347478 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.355480 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f3bf2393-e9a6-4567-b77a-4913f30ce267-config-volume\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.355917 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f3bf2393-e9a6-4567-b77a-4913f30ce267-secret-volume\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.365661 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chhfg\" (UniqueName: \"kubernetes.io/projected/f3bf2393-e9a6-4567-b77a-4913f30ce267-kube-api-access-chhfg\") pod \"collect-profiles-29401065-s4265\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.460391 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.468301 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.680963 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" event={"ID":"d552db43-2924-4931-a15d-f8803531210f","Type":"ContainerStarted","Data":"9f7dc98b5bceffa7f3567dc1c51ef65556d04b72bb95638f87897d1f47192b6a"} Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.705750 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" podStartSLOduration=2.227327777 podStartE2EDuration="5.705731173s" podCreationTimestamp="2025-11-25 09:44:55 +0000 UTC" firstStartedPulling="2025-11-25 09:44:56.359138237 +0000 UTC m=+1019.169600251" lastFinishedPulling="2025-11-25 09:44:59.837541653 +0000 UTC m=+1022.648003647" observedRunningTime="2025-11-25 09:45:00.699538632 +0000 UTC m=+1023.510000626" watchObservedRunningTime="2025-11-25 09:45:00.705731173 +0000 UTC m=+1023.516193167" Nov 25 09:45:00 crc kubenswrapper[4734]: I1125 09:45:00.896759 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265"] Nov 25 09:45:01 crc kubenswrapper[4734]: I1125 09:45:01.688151 4734 generic.go:334] "Generic (PLEG): container finished" podID="f3bf2393-e9a6-4567-b77a-4913f30ce267" containerID="6aae01c85a9d58974a5af42b06d86c7d61d70f57410a3f566244349c3d3b928f" exitCode=0 Nov 25 09:45:01 crc kubenswrapper[4734]: I1125 09:45:01.688565 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" event={"ID":"f3bf2393-e9a6-4567-b77a-4913f30ce267","Type":"ContainerDied","Data":"6aae01c85a9d58974a5af42b06d86c7d61d70f57410a3f566244349c3d3b928f"} Nov 25 09:45:01 crc kubenswrapper[4734]: I1125 09:45:01.688609 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" event={"ID":"f3bf2393-e9a6-4567-b77a-4913f30ce267","Type":"ContainerStarted","Data":"eabb02abb403625daf2fc027953324ea1d213f95d6bd6e493c008dd887b633c9"} Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.060030 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.185936 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chhfg\" (UniqueName: \"kubernetes.io/projected/f3bf2393-e9a6-4567-b77a-4913f30ce267-kube-api-access-chhfg\") pod \"f3bf2393-e9a6-4567-b77a-4913f30ce267\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.185995 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f3bf2393-e9a6-4567-b77a-4913f30ce267-config-volume\") pod \"f3bf2393-e9a6-4567-b77a-4913f30ce267\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.186017 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f3bf2393-e9a6-4567-b77a-4913f30ce267-secret-volume\") pod \"f3bf2393-e9a6-4567-b77a-4913f30ce267\" (UID: \"f3bf2393-e9a6-4567-b77a-4913f30ce267\") " Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.186713 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3bf2393-e9a6-4567-b77a-4913f30ce267-config-volume" (OuterVolumeSpecName: "config-volume") pod "f3bf2393-e9a6-4567-b77a-4913f30ce267" (UID: "f3bf2393-e9a6-4567-b77a-4913f30ce267"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.195835 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3bf2393-e9a6-4567-b77a-4913f30ce267-kube-api-access-chhfg" (OuterVolumeSpecName: "kube-api-access-chhfg") pod "f3bf2393-e9a6-4567-b77a-4913f30ce267" (UID: "f3bf2393-e9a6-4567-b77a-4913f30ce267"). InnerVolumeSpecName "kube-api-access-chhfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.196162 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3bf2393-e9a6-4567-b77a-4913f30ce267-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f3bf2393-e9a6-4567-b77a-4913f30ce267" (UID: "f3bf2393-e9a6-4567-b77a-4913f30ce267"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.288008 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chhfg\" (UniqueName: \"kubernetes.io/projected/f3bf2393-e9a6-4567-b77a-4913f30ce267-kube-api-access-chhfg\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.288480 4734 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f3bf2393-e9a6-4567-b77a-4913f30ce267-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.288496 4734 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f3bf2393-e9a6-4567-b77a-4913f30ce267-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.702739 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" event={"ID":"f3bf2393-e9a6-4567-b77a-4913f30ce267","Type":"ContainerDied","Data":"eabb02abb403625daf2fc027953324ea1d213f95d6bd6e493c008dd887b633c9"} Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.702774 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-s4265" Nov 25 09:45:03 crc kubenswrapper[4734]: I1125 09:45:03.702779 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eabb02abb403625daf2fc027953324ea1d213f95d6bd6e493c008dd887b633c9" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.531990 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/rabbitmq-server-0"] Nov 25 09:45:04 crc kubenswrapper[4734]: E1125 09:45:04.532333 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3bf2393-e9a6-4567-b77a-4913f30ce267" containerName="collect-profiles" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.532348 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3bf2393-e9a6-4567-b77a-4913f30ce267" containerName="collect-profiles" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.532481 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3bf2393-e9a6-4567-b77a-4913f30ce267" containerName="collect-profiles" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.533220 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.535809 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"rabbitmq-plugins-conf" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.535828 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"rabbitmq-server-conf" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.535863 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"rabbitmq-erlang-cookie" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.535809 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"rabbitmq-default-user" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.536006 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"rabbitmq-server-dockercfg-k5qzt" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.543492 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/rabbitmq-server-0"] Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.605514 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.605584 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36037636-1752-434b-92b7-091efbfd9005-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.605616 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.605678 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36037636-1752-434b-92b7-091efbfd9005-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.605703 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbrdr\" (UniqueName: \"kubernetes.io/projected/36037636-1752-434b-92b7-091efbfd9005-kube-api-access-hbrdr\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.605761 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-2acc7db1-75ef-4c45-bb1d-b4253c5bdaae\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2acc7db1-75ef-4c45-bb1d-b4253c5bdaae\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.605803 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36037636-1752-434b-92b7-091efbfd9005-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.605830 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.706983 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.707057 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.707080 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36037636-1752-434b-92b7-091efbfd9005-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.707117 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.707142 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36037636-1752-434b-92b7-091efbfd9005-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.707167 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbrdr\" (UniqueName: \"kubernetes.io/projected/36037636-1752-434b-92b7-091efbfd9005-kube-api-access-hbrdr\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.707195 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-2acc7db1-75ef-4c45-bb1d-b4253c5bdaae\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2acc7db1-75ef-4c45-bb1d-b4253c5bdaae\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.707229 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36037636-1752-434b-92b7-091efbfd9005-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.707524 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.708135 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/36037636-1752-434b-92b7-091efbfd9005-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.708247 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.709998 4734 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.710030 4734 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-2acc7db1-75ef-4c45-bb1d-b4253c5bdaae\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2acc7db1-75ef-4c45-bb1d-b4253c5bdaae\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/db730d9866dab02e53b3e383538a6ec5e1980785a3ffadedd64adfcf8591cac8/globalmount\"" pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.711968 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/36037636-1752-434b-92b7-091efbfd9005-pod-info\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.712549 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/36037636-1752-434b-92b7-091efbfd9005-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.712931 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/36037636-1752-434b-92b7-091efbfd9005-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.727356 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbrdr\" (UniqueName: \"kubernetes.io/projected/36037636-1752-434b-92b7-091efbfd9005-kube-api-access-hbrdr\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.742535 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-2acc7db1-75ef-4c45-bb1d-b4253c5bdaae\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2acc7db1-75ef-4c45-bb1d-b4253c5bdaae\") pod \"rabbitmq-server-0\" (UID: \"36037636-1752-434b-92b7-091efbfd9005\") " pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:04 crc kubenswrapper[4734]: I1125 09:45:04.850438 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:05 crc kubenswrapper[4734]: I1125 09:45:05.271157 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/rabbitmq-server-0"] Nov 25 09:45:05 crc kubenswrapper[4734]: W1125 09:45:05.277113 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36037636_1752_434b_92b7_091efbfd9005.slice/crio-88f9d106be16ef44cbd535840dd5ac493f8d0b8a75602b4dd6317b571af6e5e3 WatchSource:0}: Error finding container 88f9d106be16ef44cbd535840dd5ac493f8d0b8a75602b4dd6317b571af6e5e3: Status 404 returned error can't find the container with id 88f9d106be16ef44cbd535840dd5ac493f8d0b8a75602b4dd6317b571af6e5e3 Nov 25 09:45:05 crc kubenswrapper[4734]: I1125 09:45:05.714768 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/rabbitmq-server-0" event={"ID":"36037636-1752-434b-92b7-091efbfd9005","Type":"ContainerStarted","Data":"88f9d106be16ef44cbd535840dd5ac493f8d0b8a75602b4dd6317b571af6e5e3"} Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.204679 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-index-t6pq7"] Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.205477 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-t6pq7" Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.208015 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-index-dockercfg-lppsb" Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.211946 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-t6pq7"] Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.327718 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdxh6\" (UniqueName: \"kubernetes.io/projected/9a39071e-e318-43fb-9b8b-8172f25e4087-kube-api-access-gdxh6\") pod \"keystone-operator-index-t6pq7\" (UID: \"9a39071e-e318-43fb-9b8b-8172f25e4087\") " pod="openstack-operators/keystone-operator-index-t6pq7" Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.428650 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdxh6\" (UniqueName: \"kubernetes.io/projected/9a39071e-e318-43fb-9b8b-8172f25e4087-kube-api-access-gdxh6\") pod \"keystone-operator-index-t6pq7\" (UID: \"9a39071e-e318-43fb-9b8b-8172f25e4087\") " pod="openstack-operators/keystone-operator-index-t6pq7" Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.450427 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdxh6\" (UniqueName: \"kubernetes.io/projected/9a39071e-e318-43fb-9b8b-8172f25e4087-kube-api-access-gdxh6\") pod \"keystone-operator-index-t6pq7\" (UID: \"9a39071e-e318-43fb-9b8b-8172f25e4087\") " pod="openstack-operators/keystone-operator-index-t6pq7" Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.530721 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-t6pq7" Nov 25 09:45:06 crc kubenswrapper[4734]: I1125 09:45:06.834984 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-t6pq7"] Nov 25 09:45:06 crc kubenswrapper[4734]: W1125 09:45:06.871439 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a39071e_e318_43fb_9b8b_8172f25e4087.slice/crio-d9720529490fbb36419dc55cf735ec9912ab6f6da5e7b3b37ce49b7a00b89d4c WatchSource:0}: Error finding container d9720529490fbb36419dc55cf735ec9912ab6f6da5e7b3b37ce49b7a00b89d4c: Status 404 returned error can't find the container with id d9720529490fbb36419dc55cf735ec9912ab6f6da5e7b3b37ce49b7a00b89d4c Nov 25 09:45:07 crc kubenswrapper[4734]: I1125 09:45:07.741846 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-t6pq7" event={"ID":"9a39071e-e318-43fb-9b8b-8172f25e4087","Type":"ContainerStarted","Data":"d9720529490fbb36419dc55cf735ec9912ab6f6da5e7b3b37ce49b7a00b89d4c"} Nov 25 09:45:10 crc kubenswrapper[4734]: I1125 09:45:10.602469 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-index-t6pq7"] Nov 25 09:45:11 crc kubenswrapper[4734]: I1125 09:45:11.208242 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-index-vf5n5"] Nov 25 09:45:11 crc kubenswrapper[4734]: I1125 09:45:11.209024 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:11 crc kubenswrapper[4734]: I1125 09:45:11.216653 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-vf5n5"] Nov 25 09:45:11 crc kubenswrapper[4734]: I1125 09:45:11.295884 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xp2xq\" (UniqueName: \"kubernetes.io/projected/4e0cde2c-ad49-41db-8ec7-a3c02a26fb3b-kube-api-access-xp2xq\") pod \"keystone-operator-index-vf5n5\" (UID: \"4e0cde2c-ad49-41db-8ec7-a3c02a26fb3b\") " pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:11 crc kubenswrapper[4734]: I1125 09:45:11.398296 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xp2xq\" (UniqueName: \"kubernetes.io/projected/4e0cde2c-ad49-41db-8ec7-a3c02a26fb3b-kube-api-access-xp2xq\") pod \"keystone-operator-index-vf5n5\" (UID: \"4e0cde2c-ad49-41db-8ec7-a3c02a26fb3b\") " pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:11 crc kubenswrapper[4734]: I1125 09:45:11.419347 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xp2xq\" (UniqueName: \"kubernetes.io/projected/4e0cde2c-ad49-41db-8ec7-a3c02a26fb3b-kube-api-access-xp2xq\") pod \"keystone-operator-index-vf5n5\" (UID: \"4e0cde2c-ad49-41db-8ec7-a3c02a26fb3b\") " pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:11 crc kubenswrapper[4734]: I1125 09:45:11.540453 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:12 crc kubenswrapper[4734]: I1125 09:45:12.480519 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-index-vf5n5"] Nov 25 09:45:12 crc kubenswrapper[4734]: I1125 09:45:12.771850 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-t6pq7" event={"ID":"9a39071e-e318-43fb-9b8b-8172f25e4087","Type":"ContainerStarted","Data":"1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177"} Nov 25 09:45:12 crc kubenswrapper[4734]: I1125 09:45:12.771924 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/keystone-operator-index-t6pq7" podUID="9a39071e-e318-43fb-9b8b-8172f25e4087" containerName="registry-server" containerID="cri-o://1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177" gracePeriod=2 Nov 25 09:45:12 crc kubenswrapper[4734]: I1125 09:45:12.774068 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-vf5n5" event={"ID":"4e0cde2c-ad49-41db-8ec7-a3c02a26fb3b","Type":"ContainerStarted","Data":"6268b2d89bc36b8c6c54d164e1aa9f84844c94353c8d68ddbbc1f7726a555d7a"} Nov 25 09:45:12 crc kubenswrapper[4734]: I1125 09:45:12.774328 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-vf5n5" event={"ID":"4e0cde2c-ad49-41db-8ec7-a3c02a26fb3b","Type":"ContainerStarted","Data":"5f8d311e6b5b85df43d4d383020d3e00c32e591af4c10b5bac1b874f57a9e548"} Nov 25 09:45:12 crc kubenswrapper[4734]: I1125 09:45:12.791147 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-index-t6pq7" podStartSLOduration=1.309566422 podStartE2EDuration="6.791123174s" podCreationTimestamp="2025-11-25 09:45:06 +0000 UTC" firstStartedPulling="2025-11-25 09:45:06.874599951 +0000 UTC m=+1029.685061945" lastFinishedPulling="2025-11-25 09:45:12.356156703 +0000 UTC m=+1035.166618697" observedRunningTime="2025-11-25 09:45:12.787047605 +0000 UTC m=+1035.597509599" watchObservedRunningTime="2025-11-25 09:45:12.791123174 +0000 UTC m=+1035.601585178" Nov 25 09:45:12 crc kubenswrapper[4734]: I1125 09:45:12.800717 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-index-vf5n5" podStartSLOduration=1.746649855 podStartE2EDuration="1.800697244s" podCreationTimestamp="2025-11-25 09:45:11 +0000 UTC" firstStartedPulling="2025-11-25 09:45:12.492408805 +0000 UTC m=+1035.302870799" lastFinishedPulling="2025-11-25 09:45:12.546456194 +0000 UTC m=+1035.356918188" observedRunningTime="2025-11-25 09:45:12.799850349 +0000 UTC m=+1035.610312353" watchObservedRunningTime="2025-11-25 09:45:12.800697244 +0000 UTC m=+1035.611159238" Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.287874 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-t6pq7" Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.429165 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdxh6\" (UniqueName: \"kubernetes.io/projected/9a39071e-e318-43fb-9b8b-8172f25e4087-kube-api-access-gdxh6\") pod \"9a39071e-e318-43fb-9b8b-8172f25e4087\" (UID: \"9a39071e-e318-43fb-9b8b-8172f25e4087\") " Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.434771 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a39071e-e318-43fb-9b8b-8172f25e4087-kube-api-access-gdxh6" (OuterVolumeSpecName: "kube-api-access-gdxh6") pod "9a39071e-e318-43fb-9b8b-8172f25e4087" (UID: "9a39071e-e318-43fb-9b8b-8172f25e4087"). InnerVolumeSpecName "kube-api-access-gdxh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.530972 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdxh6\" (UniqueName: \"kubernetes.io/projected/9a39071e-e318-43fb-9b8b-8172f25e4087-kube-api-access-gdxh6\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.782247 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/rabbitmq-server-0" event={"ID":"36037636-1752-434b-92b7-091efbfd9005","Type":"ContainerStarted","Data":"87f515c06f302640c39cb66e613672691572d7957c557a2b01a87da115716d25"} Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.783658 4734 generic.go:334] "Generic (PLEG): container finished" podID="9a39071e-e318-43fb-9b8b-8172f25e4087" containerID="1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177" exitCode=0 Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.783956 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-index-t6pq7" Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.784480 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-t6pq7" event={"ID":"9a39071e-e318-43fb-9b8b-8172f25e4087","Type":"ContainerDied","Data":"1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177"} Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.784521 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-index-t6pq7" event={"ID":"9a39071e-e318-43fb-9b8b-8172f25e4087","Type":"ContainerDied","Data":"d9720529490fbb36419dc55cf735ec9912ab6f6da5e7b3b37ce49b7a00b89d4c"} Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.784542 4734 scope.go:117] "RemoveContainer" containerID="1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177" Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.805301 4734 scope.go:117] "RemoveContainer" containerID="1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177" Nov 25 09:45:13 crc kubenswrapper[4734]: E1125 09:45:13.805867 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177\": container with ID starting with 1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177 not found: ID does not exist" containerID="1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177" Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.805955 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177"} err="failed to get container status \"1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177\": rpc error: code = NotFound desc = could not find container \"1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177\": container with ID starting with 1f684ac8c3eb5f4735b192cd9250dedc7f7b79f13265d14f9ebb7127faabc177 not found: ID does not exist" Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.822254 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/keystone-operator-index-t6pq7"] Nov 25 09:45:13 crc kubenswrapper[4734]: I1125 09:45:13.825582 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/keystone-operator-index-t6pq7"] Nov 25 09:45:14 crc kubenswrapper[4734]: I1125 09:45:14.255361 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a39071e-e318-43fb-9b8b-8172f25e4087" path="/var/lib/kubelet/pods/9a39071e-e318-43fb-9b8b-8172f25e4087/volumes" Nov 25 09:45:21 crc kubenswrapper[4734]: I1125 09:45:21.541059 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:21 crc kubenswrapper[4734]: I1125 09:45:21.542000 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:21 crc kubenswrapper[4734]: I1125 09:45:21.568478 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:21 crc kubenswrapper[4734]: I1125 09:45:21.858194 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-index-vf5n5" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.836075 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc"] Nov 25 09:45:25 crc kubenswrapper[4734]: E1125 09:45:25.836558 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a39071e-e318-43fb-9b8b-8172f25e4087" containerName="registry-server" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.836569 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a39071e-e318-43fb-9b8b-8172f25e4087" containerName="registry-server" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.836698 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a39071e-e318-43fb-9b8b-8172f25e4087" containerName="registry-server" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.837477 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.839273 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9j88k" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.857825 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc"] Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.894095 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-bundle\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.894175 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-util\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.894223 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhc5d\" (UniqueName: \"kubernetes.io/projected/f8708ff3-d7c1-4565-8300-4742a87d7c84-kube-api-access-hhc5d\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.995999 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-bundle\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.996047 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-util\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.996079 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhc5d\" (UniqueName: \"kubernetes.io/projected/f8708ff3-d7c1-4565-8300-4742a87d7c84-kube-api-access-hhc5d\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.996837 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-util\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:25 crc kubenswrapper[4734]: I1125 09:45:25.997606 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-bundle\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:26 crc kubenswrapper[4734]: I1125 09:45:26.013104 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhc5d\" (UniqueName: \"kubernetes.io/projected/f8708ff3-d7c1-4565-8300-4742a87d7c84-kube-api-access-hhc5d\") pod \"6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:26 crc kubenswrapper[4734]: I1125 09:45:26.152511 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:26 crc kubenswrapper[4734]: I1125 09:45:26.353556 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc"] Nov 25 09:45:26 crc kubenswrapper[4734]: W1125 09:45:26.358336 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8708ff3_d7c1_4565_8300_4742a87d7c84.slice/crio-6d6609aa49c7c8f12b132845e852ed997928556682e94df7afc123fa7666c04b WatchSource:0}: Error finding container 6d6609aa49c7c8f12b132845e852ed997928556682e94df7afc123fa7666c04b: Status 404 returned error can't find the container with id 6d6609aa49c7c8f12b132845e852ed997928556682e94df7afc123fa7666c04b Nov 25 09:45:26 crc kubenswrapper[4734]: I1125 09:45:26.871269 4734 generic.go:334] "Generic (PLEG): container finished" podID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerID="8d08de8fe1620bae018bb278eff69de04b3c493c21aa9bf8728c19f2c47d0eed" exitCode=0 Nov 25 09:45:26 crc kubenswrapper[4734]: I1125 09:45:26.871322 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" event={"ID":"f8708ff3-d7c1-4565-8300-4742a87d7c84","Type":"ContainerDied","Data":"8d08de8fe1620bae018bb278eff69de04b3c493c21aa9bf8728c19f2c47d0eed"} Nov 25 09:45:26 crc kubenswrapper[4734]: I1125 09:45:26.871357 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" event={"ID":"f8708ff3-d7c1-4565-8300-4742a87d7c84","Type":"ContainerStarted","Data":"6d6609aa49c7c8f12b132845e852ed997928556682e94df7afc123fa7666c04b"} Nov 25 09:45:27 crc kubenswrapper[4734]: I1125 09:45:27.880781 4734 generic.go:334] "Generic (PLEG): container finished" podID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerID="26699024a60a00aa0278c4d438f85bc6fa403ec69feff784bb5c45f6156f494c" exitCode=0 Nov 25 09:45:27 crc kubenswrapper[4734]: I1125 09:45:27.880835 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" event={"ID":"f8708ff3-d7c1-4565-8300-4742a87d7c84","Type":"ContainerDied","Data":"26699024a60a00aa0278c4d438f85bc6fa403ec69feff784bb5c45f6156f494c"} Nov 25 09:45:28 crc kubenswrapper[4734]: I1125 09:45:28.888562 4734 generic.go:334] "Generic (PLEG): container finished" podID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerID="18624c365a3c292f6f762f8f0ef297b8d3ee0847d9b717d40e1622ac0507d9c5" exitCode=0 Nov 25 09:45:28 crc kubenswrapper[4734]: I1125 09:45:28.888663 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" event={"ID":"f8708ff3-d7c1-4565-8300-4742a87d7c84","Type":"ContainerDied","Data":"18624c365a3c292f6f762f8f0ef297b8d3ee0847d9b717d40e1622ac0507d9c5"} Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.146906 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.274110 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhc5d\" (UniqueName: \"kubernetes.io/projected/f8708ff3-d7c1-4565-8300-4742a87d7c84-kube-api-access-hhc5d\") pod \"f8708ff3-d7c1-4565-8300-4742a87d7c84\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.274262 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-bundle\") pod \"f8708ff3-d7c1-4565-8300-4742a87d7c84\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.274345 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-util\") pod \"f8708ff3-d7c1-4565-8300-4742a87d7c84\" (UID: \"f8708ff3-d7c1-4565-8300-4742a87d7c84\") " Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.276055 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-bundle" (OuterVolumeSpecName: "bundle") pod "f8708ff3-d7c1-4565-8300-4742a87d7c84" (UID: "f8708ff3-d7c1-4565-8300-4742a87d7c84"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.279605 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8708ff3-d7c1-4565-8300-4742a87d7c84-kube-api-access-hhc5d" (OuterVolumeSpecName: "kube-api-access-hhc5d") pod "f8708ff3-d7c1-4565-8300-4742a87d7c84" (UID: "f8708ff3-d7c1-4565-8300-4742a87d7c84"). InnerVolumeSpecName "kube-api-access-hhc5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.289037 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-util" (OuterVolumeSpecName: "util") pod "f8708ff3-d7c1-4565-8300-4742a87d7c84" (UID: "f8708ff3-d7c1-4565-8300-4742a87d7c84"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.375674 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhc5d\" (UniqueName: \"kubernetes.io/projected/f8708ff3-d7c1-4565-8300-4742a87d7c84-kube-api-access-hhc5d\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.375747 4734 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.375758 4734 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f8708ff3-d7c1-4565-8300-4742a87d7c84-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.903353 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" event={"ID":"f8708ff3-d7c1-4565-8300-4742a87d7c84","Type":"ContainerDied","Data":"6d6609aa49c7c8f12b132845e852ed997928556682e94df7afc123fa7666c04b"} Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.903391 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d6609aa49c7c8f12b132845e852ed997928556682e94df7afc123fa7666c04b" Nov 25 09:45:30 crc kubenswrapper[4734]: I1125 09:45:30.903417 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/6bc881fae556025ca36ba7bf9e8c0526ac6204c748e79091b6b4c25fe3h9xkc" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.169106 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx"] Nov 25 09:45:37 crc kubenswrapper[4734]: E1125 09:45:37.169961 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerName="pull" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.169979 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerName="pull" Nov 25 09:45:37 crc kubenswrapper[4734]: E1125 09:45:37.169990 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerName="extract" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.169997 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerName="extract" Nov 25 09:45:37 crc kubenswrapper[4734]: E1125 09:45:37.170014 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerName="util" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.170024 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerName="util" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.170173 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8708ff3-d7c1-4565-8300-4742a87d7c84" containerName="extract" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.170751 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.172969 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-tj5px" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.173604 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-service-cert" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.189024 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx"] Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.265419 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eeac7687-6578-41aa-99da-c576d6162d9e-apiservice-cert\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.265524 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eeac7687-6578-41aa-99da-c576d6162d9e-webhook-cert\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.265580 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97gjn\" (UniqueName: \"kubernetes.io/projected/eeac7687-6578-41aa-99da-c576d6162d9e-kube-api-access-97gjn\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.366979 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eeac7687-6578-41aa-99da-c576d6162d9e-apiservice-cert\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.367064 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eeac7687-6578-41aa-99da-c576d6162d9e-webhook-cert\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.367146 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97gjn\" (UniqueName: \"kubernetes.io/projected/eeac7687-6578-41aa-99da-c576d6162d9e-kube-api-access-97gjn\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.372899 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/eeac7687-6578-41aa-99da-c576d6162d9e-webhook-cert\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.379805 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/eeac7687-6578-41aa-99da-c576d6162d9e-apiservice-cert\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.385100 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97gjn\" (UniqueName: \"kubernetes.io/projected/eeac7687-6578-41aa-99da-c576d6162d9e-kube-api-access-97gjn\") pod \"keystone-operator-controller-manager-59fbfdbcd7-cvvvx\" (UID: \"eeac7687-6578-41aa-99da-c576d6162d9e\") " pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.491845 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.713645 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx"] Nov 25 09:45:37 crc kubenswrapper[4734]: I1125 09:45:37.946473 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" event={"ID":"eeac7687-6578-41aa-99da-c576d6162d9e","Type":"ContainerStarted","Data":"67bbeeb4553b3279c926dc38ee3545d8ee93900c7143e39e6ed877717ec666ed"} Nov 25 09:45:44 crc kubenswrapper[4734]: I1125 09:45:44.003417 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" event={"ID":"eeac7687-6578-41aa-99da-c576d6162d9e","Type":"ContainerStarted","Data":"437746ac5415afbb4539bd1637931458dd0a4102e3630423816351a3ff378f85"} Nov 25 09:45:44 crc kubenswrapper[4734]: I1125 09:45:44.003910 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:44 crc kubenswrapper[4734]: I1125 09:45:44.020129 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" podStartSLOduration=1.8500156460000001 podStartE2EDuration="7.020115673s" podCreationTimestamp="2025-11-25 09:45:37 +0000 UTC" firstStartedPulling="2025-11-25 09:45:37.721828974 +0000 UTC m=+1060.532290968" lastFinishedPulling="2025-11-25 09:45:42.891929001 +0000 UTC m=+1065.702390995" observedRunningTime="2025-11-25 09:45:44.018748874 +0000 UTC m=+1066.829210888" watchObservedRunningTime="2025-11-25 09:45:44.020115673 +0000 UTC m=+1066.830577667" Nov 25 09:45:45 crc kubenswrapper[4734]: I1125 09:45:45.009409 4734 generic.go:334] "Generic (PLEG): container finished" podID="36037636-1752-434b-92b7-091efbfd9005" containerID="87f515c06f302640c39cb66e613672691572d7957c557a2b01a87da115716d25" exitCode=0 Nov 25 09:45:45 crc kubenswrapper[4734]: I1125 09:45:45.010379 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/rabbitmq-server-0" event={"ID":"36037636-1752-434b-92b7-091efbfd9005","Type":"ContainerDied","Data":"87f515c06f302640c39cb66e613672691572d7957c557a2b01a87da115716d25"} Nov 25 09:45:46 crc kubenswrapper[4734]: I1125 09:45:46.021197 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/rabbitmq-server-0" event={"ID":"36037636-1752-434b-92b7-091efbfd9005","Type":"ContainerStarted","Data":"801567ce604c218f4fb7d77ecde339c13f05a9772641981592b7e3a8b03dfd80"} Nov 25 09:45:46 crc kubenswrapper[4734]: I1125 09:45:46.021711 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:45:46 crc kubenswrapper[4734]: I1125 09:45:46.042836 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/rabbitmq-server-0" podStartSLOduration=36.040120577 podStartE2EDuration="43.04281477s" podCreationTimestamp="2025-11-25 09:45:03 +0000 UTC" firstStartedPulling="2025-11-25 09:45:05.279303403 +0000 UTC m=+1028.089765397" lastFinishedPulling="2025-11-25 09:45:12.281997596 +0000 UTC m=+1035.092459590" observedRunningTime="2025-11-25 09:45:46.039616849 +0000 UTC m=+1068.850078843" watchObservedRunningTime="2025-11-25 09:45:46.04281477 +0000 UTC m=+1068.853276764" Nov 25 09:45:57 crc kubenswrapper[4734]: I1125 09:45:57.496259 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.480152 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd"] Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.481080 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.483074 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-db-secret" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.490975 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd"] Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.580533 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-create-sndrq"] Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.581386 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.596326 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-sndrq"] Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.652616 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjbfz\" (UniqueName: \"kubernetes.io/projected/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-kube-api-access-fjbfz\") pod \"keystone-2100-account-create-update-vxxmd\" (UID: \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\") " pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.652912 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d678534-ab47-45c3-9dea-e54fd17540d0-operator-scripts\") pod \"keystone-db-create-sndrq\" (UID: \"4d678534-ab47-45c3-9dea-e54fd17540d0\") " pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.653191 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9tnk\" (UniqueName: \"kubernetes.io/projected/4d678534-ab47-45c3-9dea-e54fd17540d0-kube-api-access-d9tnk\") pod \"keystone-db-create-sndrq\" (UID: \"4d678534-ab47-45c3-9dea-e54fd17540d0\") " pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.653275 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-operator-scripts\") pod \"keystone-2100-account-create-update-vxxmd\" (UID: \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\") " pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.754071 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-operator-scripts\") pod \"keystone-2100-account-create-update-vxxmd\" (UID: \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\") " pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.754186 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjbfz\" (UniqueName: \"kubernetes.io/projected/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-kube-api-access-fjbfz\") pod \"keystone-2100-account-create-update-vxxmd\" (UID: \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\") " pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.754225 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d678534-ab47-45c3-9dea-e54fd17540d0-operator-scripts\") pod \"keystone-db-create-sndrq\" (UID: \"4d678534-ab47-45c3-9dea-e54fd17540d0\") " pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.754256 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9tnk\" (UniqueName: \"kubernetes.io/projected/4d678534-ab47-45c3-9dea-e54fd17540d0-kube-api-access-d9tnk\") pod \"keystone-db-create-sndrq\" (UID: \"4d678534-ab47-45c3-9dea-e54fd17540d0\") " pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.755361 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-operator-scripts\") pod \"keystone-2100-account-create-update-vxxmd\" (UID: \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\") " pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.755802 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d678534-ab47-45c3-9dea-e54fd17540d0-operator-scripts\") pod \"keystone-db-create-sndrq\" (UID: \"4d678534-ab47-45c3-9dea-e54fd17540d0\") " pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.773441 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9tnk\" (UniqueName: \"kubernetes.io/projected/4d678534-ab47-45c3-9dea-e54fd17540d0-kube-api-access-d9tnk\") pod \"keystone-db-create-sndrq\" (UID: \"4d678534-ab47-45c3-9dea-e54fd17540d0\") " pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.775879 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjbfz\" (UniqueName: \"kubernetes.io/projected/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-kube-api-access-fjbfz\") pod \"keystone-2100-account-create-update-vxxmd\" (UID: \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\") " pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.799666 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:45:58 crc kubenswrapper[4734]: I1125 09:45:58.898692 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:45:59 crc kubenswrapper[4734]: I1125 09:45:59.257193 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd"] Nov 25 09:45:59 crc kubenswrapper[4734]: I1125 09:45:59.310541 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-sndrq"] Nov 25 09:45:59 crc kubenswrapper[4734]: W1125 09:45:59.324287 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d678534_ab47_45c3_9dea_e54fd17540d0.slice/crio-740e248f451de67b31399472929db8f595fa4ed69c439b002229733da7fb170b WatchSource:0}: Error finding container 740e248f451de67b31399472929db8f595fa4ed69c439b002229733da7fb170b: Status 404 returned error can't find the container with id 740e248f451de67b31399472929db8f595fa4ed69c439b002229733da7fb170b Nov 25 09:46:00 crc kubenswrapper[4734]: I1125 09:46:00.111896 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" event={"ID":"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da","Type":"ContainerStarted","Data":"54a8e85b7b1989eb0e3dab80ef757564e03cb8990e886cd86658220c888eb628"} Nov 25 09:46:00 crc kubenswrapper[4734]: I1125 09:46:00.112304 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" event={"ID":"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da","Type":"ContainerStarted","Data":"91bbc4aa02df3c45d807be08bf48845dcd91970dd267d6ba36ae077e59d4570b"} Nov 25 09:46:00 crc kubenswrapper[4734]: I1125 09:46:00.113452 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-sndrq" event={"ID":"4d678534-ab47-45c3-9dea-e54fd17540d0","Type":"ContainerStarted","Data":"f8da931fe99629a6788f6c1487b603e2759679a850fe64d151a0a47b8b5913d6"} Nov 25 09:46:00 crc kubenswrapper[4734]: I1125 09:46:00.113490 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-sndrq" event={"ID":"4d678534-ab47-45c3-9dea-e54fd17540d0","Type":"ContainerStarted","Data":"740e248f451de67b31399472929db8f595fa4ed69c439b002229733da7fb170b"} Nov 25 09:46:00 crc kubenswrapper[4734]: I1125 09:46:00.130677 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" podStartSLOduration=2.130656767 podStartE2EDuration="2.130656767s" podCreationTimestamp="2025-11-25 09:45:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.129223456 +0000 UTC m=+1082.939685470" watchObservedRunningTime="2025-11-25 09:46:00.130656767 +0000 UTC m=+1082.941118761" Nov 25 09:46:00 crc kubenswrapper[4734]: I1125 09:46:00.156262 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-create-sndrq" podStartSLOduration=2.156245068 podStartE2EDuration="2.156245068s" podCreationTimestamp="2025-11-25 09:45:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.152660876 +0000 UTC m=+1082.963122870" watchObservedRunningTime="2025-11-25 09:46:00.156245068 +0000 UTC m=+1082.966707052" Nov 25 09:46:04 crc kubenswrapper[4734]: I1125 09:46:04.854182 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/rabbitmq-server-0" Nov 25 09:46:06 crc kubenswrapper[4734]: I1125 09:46:06.151989 4734 generic.go:334] "Generic (PLEG): container finished" podID="0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da" containerID="54a8e85b7b1989eb0e3dab80ef757564e03cb8990e886cd86658220c888eb628" exitCode=0 Nov 25 09:46:06 crc kubenswrapper[4734]: I1125 09:46:06.152065 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" event={"ID":"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da","Type":"ContainerDied","Data":"54a8e85b7b1989eb0e3dab80ef757564e03cb8990e886cd86658220c888eb628"} Nov 25 09:46:06 crc kubenswrapper[4734]: I1125 09:46:06.153870 4734 generic.go:334] "Generic (PLEG): container finished" podID="4d678534-ab47-45c3-9dea-e54fd17540d0" containerID="f8da931fe99629a6788f6c1487b603e2759679a850fe64d151a0a47b8b5913d6" exitCode=0 Nov 25 09:46:06 crc kubenswrapper[4734]: I1125 09:46:06.153902 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-sndrq" event={"ID":"4d678534-ab47-45c3-9dea-e54fd17540d0","Type":"ContainerDied","Data":"f8da931fe99629a6788f6c1487b603e2759679a850fe64d151a0a47b8b5913d6"} Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.518683 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.523908 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.672551 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-operator-scripts\") pod \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\" (UID: \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\") " Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.672629 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjbfz\" (UniqueName: \"kubernetes.io/projected/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-kube-api-access-fjbfz\") pod \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\" (UID: \"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da\") " Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.672716 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d678534-ab47-45c3-9dea-e54fd17540d0-operator-scripts\") pod \"4d678534-ab47-45c3-9dea-e54fd17540d0\" (UID: \"4d678534-ab47-45c3-9dea-e54fd17540d0\") " Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.672771 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9tnk\" (UniqueName: \"kubernetes.io/projected/4d678534-ab47-45c3-9dea-e54fd17540d0-kube-api-access-d9tnk\") pod \"4d678534-ab47-45c3-9dea-e54fd17540d0\" (UID: \"4d678534-ab47-45c3-9dea-e54fd17540d0\") " Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.673389 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da" (UID: "0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.673483 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d678534-ab47-45c3-9dea-e54fd17540d0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4d678534-ab47-45c3-9dea-e54fd17540d0" (UID: "4d678534-ab47-45c3-9dea-e54fd17540d0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.679304 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d678534-ab47-45c3-9dea-e54fd17540d0-kube-api-access-d9tnk" (OuterVolumeSpecName: "kube-api-access-d9tnk") pod "4d678534-ab47-45c3-9dea-e54fd17540d0" (UID: "4d678534-ab47-45c3-9dea-e54fd17540d0"). InnerVolumeSpecName "kube-api-access-d9tnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.679357 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-kube-api-access-fjbfz" (OuterVolumeSpecName: "kube-api-access-fjbfz") pod "0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da" (UID: "0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da"). InnerVolumeSpecName "kube-api-access-fjbfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.773825 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9tnk\" (UniqueName: \"kubernetes.io/projected/4d678534-ab47-45c3-9dea-e54fd17540d0-kube-api-access-d9tnk\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.773872 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.773885 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjbfz\" (UniqueName: \"kubernetes.io/projected/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da-kube-api-access-fjbfz\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:07 crc kubenswrapper[4734]: I1125 09:46:07.773897 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d678534-ab47-45c3-9dea-e54fd17540d0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:08 crc kubenswrapper[4734]: I1125 09:46:08.167220 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-sndrq" event={"ID":"4d678534-ab47-45c3-9dea-e54fd17540d0","Type":"ContainerDied","Data":"740e248f451de67b31399472929db8f595fa4ed69c439b002229733da7fb170b"} Nov 25 09:46:08 crc kubenswrapper[4734]: I1125 09:46:08.167246 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-sndrq" Nov 25 09:46:08 crc kubenswrapper[4734]: I1125 09:46:08.167311 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="740e248f451de67b31399472929db8f595fa4ed69c439b002229733da7fb170b" Nov 25 09:46:08 crc kubenswrapper[4734]: I1125 09:46:08.168424 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" event={"ID":"0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da","Type":"ContainerDied","Data":"91bbc4aa02df3c45d807be08bf48845dcd91970dd267d6ba36ae077e59d4570b"} Nov 25 09:46:08 crc kubenswrapper[4734]: I1125 09:46:08.168455 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91bbc4aa02df3c45d807be08bf48845dcd91970dd267d6ba36ae077e59d4570b" Nov 25 09:46:08 crc kubenswrapper[4734]: I1125 09:46:08.168501 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.063406 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-hlcmd"] Nov 25 09:46:09 crc kubenswrapper[4734]: E1125 09:46:09.063692 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da" containerName="mariadb-account-create-update" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.063710 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da" containerName="mariadb-account-create-update" Nov 25 09:46:09 crc kubenswrapper[4734]: E1125 09:46:09.063734 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d678534-ab47-45c3-9dea-e54fd17540d0" containerName="mariadb-database-create" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.063740 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d678534-ab47-45c3-9dea-e54fd17540d0" containerName="mariadb-database-create" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.063855 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da" containerName="mariadb-account-create-update" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.063874 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d678534-ab47-45c3-9dea-e54fd17540d0" containerName="mariadb-database-create" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.064418 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.067035 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.067121 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.067215 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-kqw8f" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.067413 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.074602 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-hlcmd"] Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.196216 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5szb\" (UniqueName: \"kubernetes.io/projected/3f9a1a65-2a28-404e-8964-008e8d7db51e-kube-api-access-q5szb\") pod \"keystone-db-sync-hlcmd\" (UID: \"3f9a1a65-2a28-404e-8964-008e8d7db51e\") " pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.196273 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f9a1a65-2a28-404e-8964-008e8d7db51e-config-data\") pod \"keystone-db-sync-hlcmd\" (UID: \"3f9a1a65-2a28-404e-8964-008e8d7db51e\") " pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.297542 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5szb\" (UniqueName: \"kubernetes.io/projected/3f9a1a65-2a28-404e-8964-008e8d7db51e-kube-api-access-q5szb\") pod \"keystone-db-sync-hlcmd\" (UID: \"3f9a1a65-2a28-404e-8964-008e8d7db51e\") " pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.297590 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f9a1a65-2a28-404e-8964-008e8d7db51e-config-data\") pod \"keystone-db-sync-hlcmd\" (UID: \"3f9a1a65-2a28-404e-8964-008e8d7db51e\") " pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.302891 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f9a1a65-2a28-404e-8964-008e8d7db51e-config-data\") pod \"keystone-db-sync-hlcmd\" (UID: \"3f9a1a65-2a28-404e-8964-008e8d7db51e\") " pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.314608 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5szb\" (UniqueName: \"kubernetes.io/projected/3f9a1a65-2a28-404e-8964-008e8d7db51e-kube-api-access-q5szb\") pod \"keystone-db-sync-hlcmd\" (UID: \"3f9a1a65-2a28-404e-8964-008e8d7db51e\") " pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.379659 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:09 crc kubenswrapper[4734]: I1125 09:46:09.832739 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-hlcmd"] Nov 25 09:46:10 crc kubenswrapper[4734]: I1125 09:46:10.183712 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" event={"ID":"3f9a1a65-2a28-404e-8964-008e8d7db51e","Type":"ContainerStarted","Data":"536f46a3dd4a9a14ea4520e6f0499d5259224fb4b66ace909cd0884bcb61b899"} Nov 25 09:46:17 crc kubenswrapper[4734]: I1125 09:46:17.240214 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" event={"ID":"3f9a1a65-2a28-404e-8964-008e8d7db51e","Type":"ContainerStarted","Data":"0263c217aa4af48ba1112906424c64eef94a608411c74505798f73a5e8676727"} Nov 25 09:46:17 crc kubenswrapper[4734]: I1125 09:46:17.255110 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" podStartSLOduration=1.595430811 podStartE2EDuration="8.255077587s" podCreationTimestamp="2025-11-25 09:46:09 +0000 UTC" firstStartedPulling="2025-11-25 09:46:09.841761061 +0000 UTC m=+1092.652223055" lastFinishedPulling="2025-11-25 09:46:16.501407827 +0000 UTC m=+1099.311869831" observedRunningTime="2025-11-25 09:46:17.251044881 +0000 UTC m=+1100.061506875" watchObservedRunningTime="2025-11-25 09:46:17.255077587 +0000 UTC m=+1100.065539581" Nov 25 09:46:21 crc kubenswrapper[4734]: I1125 09:46:21.268684 4734 generic.go:334] "Generic (PLEG): container finished" podID="3f9a1a65-2a28-404e-8964-008e8d7db51e" containerID="0263c217aa4af48ba1112906424c64eef94a608411c74505798f73a5e8676727" exitCode=0 Nov 25 09:46:21 crc kubenswrapper[4734]: I1125 09:46:21.268796 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" event={"ID":"3f9a1a65-2a28-404e-8964-008e8d7db51e","Type":"ContainerDied","Data":"0263c217aa4af48ba1112906424c64eef94a608411c74505798f73a5e8676727"} Nov 25 09:46:22 crc kubenswrapper[4734]: I1125 09:46:22.618553 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:22 crc kubenswrapper[4734]: I1125 09:46:22.792474 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f9a1a65-2a28-404e-8964-008e8d7db51e-config-data\") pod \"3f9a1a65-2a28-404e-8964-008e8d7db51e\" (UID: \"3f9a1a65-2a28-404e-8964-008e8d7db51e\") " Nov 25 09:46:22 crc kubenswrapper[4734]: I1125 09:46:22.792681 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5szb\" (UniqueName: \"kubernetes.io/projected/3f9a1a65-2a28-404e-8964-008e8d7db51e-kube-api-access-q5szb\") pod \"3f9a1a65-2a28-404e-8964-008e8d7db51e\" (UID: \"3f9a1a65-2a28-404e-8964-008e8d7db51e\") " Nov 25 09:46:22 crc kubenswrapper[4734]: I1125 09:46:22.806166 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f9a1a65-2a28-404e-8964-008e8d7db51e-kube-api-access-q5szb" (OuterVolumeSpecName: "kube-api-access-q5szb") pod "3f9a1a65-2a28-404e-8964-008e8d7db51e" (UID: "3f9a1a65-2a28-404e-8964-008e8d7db51e"). InnerVolumeSpecName "kube-api-access-q5szb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:46:22 crc kubenswrapper[4734]: I1125 09:46:22.826040 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f9a1a65-2a28-404e-8964-008e8d7db51e-config-data" (OuterVolumeSpecName: "config-data") pod "3f9a1a65-2a28-404e-8964-008e8d7db51e" (UID: "3f9a1a65-2a28-404e-8964-008e8d7db51e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:46:22 crc kubenswrapper[4734]: I1125 09:46:22.894997 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5szb\" (UniqueName: \"kubernetes.io/projected/3f9a1a65-2a28-404e-8964-008e8d7db51e-kube-api-access-q5szb\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:22 crc kubenswrapper[4734]: I1125 09:46:22.895049 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f9a1a65-2a28-404e-8964-008e8d7db51e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.291596 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" event={"ID":"3f9a1a65-2a28-404e-8964-008e8d7db51e","Type":"ContainerDied","Data":"536f46a3dd4a9a14ea4520e6f0499d5259224fb4b66ace909cd0884bcb61b899"} Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.291645 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-hlcmd" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.291660 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="536f46a3dd4a9a14ea4520e6f0499d5259224fb4b66ace909cd0884bcb61b899" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.504126 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-xvp4p"] Nov 25 09:46:23 crc kubenswrapper[4734]: E1125 09:46:23.504474 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f9a1a65-2a28-404e-8964-008e8d7db51e" containerName="keystone-db-sync" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.504493 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f9a1a65-2a28-404e-8964-008e8d7db51e" containerName="keystone-db-sync" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.504636 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f9a1a65-2a28-404e-8964-008e8d7db51e" containerName="keystone-db-sync" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.505160 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.509833 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.510026 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"osp-secret" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.510240 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.510355 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.511411 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-kqw8f" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.522610 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-xvp4p"] Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.605348 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-scripts\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.605418 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-fernet-keys\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.605516 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-config-data\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.605582 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtrr9\" (UniqueName: \"kubernetes.io/projected/f8ac9888-cdcd-4d41-bc95-8943f5cce007-kube-api-access-jtrr9\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.605629 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-credential-keys\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.707610 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-credential-keys\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.707720 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-scripts\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.707754 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-fernet-keys\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.707787 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-config-data\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.707858 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtrr9\" (UniqueName: \"kubernetes.io/projected/f8ac9888-cdcd-4d41-bc95-8943f5cce007-kube-api-access-jtrr9\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.713727 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-config-data\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.714803 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-credential-keys\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.719886 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-scripts\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.724358 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-fernet-keys\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.732261 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtrr9\" (UniqueName: \"kubernetes.io/projected/f8ac9888-cdcd-4d41-bc95-8943f5cce007-kube-api-access-jtrr9\") pod \"keystone-bootstrap-xvp4p\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:23 crc kubenswrapper[4734]: I1125 09:46:23.820181 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:24 crc kubenswrapper[4734]: I1125 09:46:24.010680 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-xvp4p"] Nov 25 09:46:24 crc kubenswrapper[4734]: I1125 09:46:24.299617 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" event={"ID":"f8ac9888-cdcd-4d41-bc95-8943f5cce007","Type":"ContainerStarted","Data":"d6868ecb85c915efcdd01fb9921bc21552a1d97274931871e14ddf6afa7ffd63"} Nov 25 09:46:24 crc kubenswrapper[4734]: I1125 09:46:24.299662 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" event={"ID":"f8ac9888-cdcd-4d41-bc95-8943f5cce007","Type":"ContainerStarted","Data":"6c9b778f8f58725c5150bebe56888dfb72b17390995a1b3fa577135fa08cd90b"} Nov 25 09:46:24 crc kubenswrapper[4734]: I1125 09:46:24.321400 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" podStartSLOduration=1.321377775 podStartE2EDuration="1.321377775s" podCreationTimestamp="2025-11-25 09:46:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:24.315575259 +0000 UTC m=+1107.126037273" watchObservedRunningTime="2025-11-25 09:46:24.321377775 +0000 UTC m=+1107.131839769" Nov 25 09:46:27 crc kubenswrapper[4734]: I1125 09:46:27.318325 4734 generic.go:334] "Generic (PLEG): container finished" podID="f8ac9888-cdcd-4d41-bc95-8943f5cce007" containerID="d6868ecb85c915efcdd01fb9921bc21552a1d97274931871e14ddf6afa7ffd63" exitCode=0 Nov 25 09:46:27 crc kubenswrapper[4734]: I1125 09:46:27.318364 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" event={"ID":"f8ac9888-cdcd-4d41-bc95-8943f5cce007","Type":"ContainerDied","Data":"d6868ecb85c915efcdd01fb9921bc21552a1d97274931871e14ddf6afa7ffd63"} Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.628689 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.776329 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-credential-keys\") pod \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.776396 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtrr9\" (UniqueName: \"kubernetes.io/projected/f8ac9888-cdcd-4d41-bc95-8943f5cce007-kube-api-access-jtrr9\") pod \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.776464 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-scripts\") pod \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.776506 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-config-data\") pod \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.776545 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-fernet-keys\") pod \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\" (UID: \"f8ac9888-cdcd-4d41-bc95-8943f5cce007\") " Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.783381 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8ac9888-cdcd-4d41-bc95-8943f5cce007-kube-api-access-jtrr9" (OuterVolumeSpecName: "kube-api-access-jtrr9") pod "f8ac9888-cdcd-4d41-bc95-8943f5cce007" (UID: "f8ac9888-cdcd-4d41-bc95-8943f5cce007"). InnerVolumeSpecName "kube-api-access-jtrr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.784122 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-scripts" (OuterVolumeSpecName: "scripts") pod "f8ac9888-cdcd-4d41-bc95-8943f5cce007" (UID: "f8ac9888-cdcd-4d41-bc95-8943f5cce007"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.784319 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "f8ac9888-cdcd-4d41-bc95-8943f5cce007" (UID: "f8ac9888-cdcd-4d41-bc95-8943f5cce007"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.784693 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "f8ac9888-cdcd-4d41-bc95-8943f5cce007" (UID: "f8ac9888-cdcd-4d41-bc95-8943f5cce007"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.800261 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-config-data" (OuterVolumeSpecName: "config-data") pod "f8ac9888-cdcd-4d41-bc95-8943f5cce007" (UID: "f8ac9888-cdcd-4d41-bc95-8943f5cce007"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.878521 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.878566 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtrr9\" (UniqueName: \"kubernetes.io/projected/f8ac9888-cdcd-4d41-bc95-8943f5cce007-kube-api-access-jtrr9\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.878580 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.878591 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:28 crc kubenswrapper[4734]: I1125 09:46:28.878602 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f8ac9888-cdcd-4d41-bc95-8943f5cce007-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.330726 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" event={"ID":"f8ac9888-cdcd-4d41-bc95-8943f5cce007","Type":"ContainerDied","Data":"6c9b778f8f58725c5150bebe56888dfb72b17390995a1b3fa577135fa08cd90b"} Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.330774 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c9b778f8f58725c5150bebe56888dfb72b17390995a1b3fa577135fa08cd90b" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.330777 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-xvp4p" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.526194 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9"] Nov 25 09:46:29 crc kubenswrapper[4734]: E1125 09:46:29.527415 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ac9888-cdcd-4d41-bc95-8943f5cce007" containerName="keystone-bootstrap" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.527454 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ac9888-cdcd-4d41-bc95-8943f5cce007" containerName="keystone-bootstrap" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.527862 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8ac9888-cdcd-4d41-bc95-8943f5cce007" containerName="keystone-bootstrap" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.528614 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.534274 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.534347 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-kqw8f" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.535773 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.535928 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.559985 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9"] Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.690689 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-scripts\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.690757 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-config-data\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.690929 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-credential-keys\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.691002 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-fernet-keys\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.691142 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cqkb\" (UniqueName: \"kubernetes.io/projected/fa91f143-fe56-4b5b-829d-4a2365ee55a2-kube-api-access-6cqkb\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.793185 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cqkb\" (UniqueName: \"kubernetes.io/projected/fa91f143-fe56-4b5b-829d-4a2365ee55a2-kube-api-access-6cqkb\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.793600 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-scripts\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.793643 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-config-data\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.793671 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-credential-keys\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.793689 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-fernet-keys\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.799667 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-credential-keys\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.799785 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-fernet-keys\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.800769 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-config-data\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.801075 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-scripts\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.816031 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cqkb\" (UniqueName: \"kubernetes.io/projected/fa91f143-fe56-4b5b-829d-4a2365ee55a2-kube-api-access-6cqkb\") pod \"keystone-6fb9f67d95-z2gh9\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:29 crc kubenswrapper[4734]: I1125 09:46:29.861864 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:46:30 crc kubenswrapper[4734]: I1125 09:46:30.321244 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9"] Nov 25 09:46:30 crc kubenswrapper[4734]: I1125 09:46:30.340701 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" event={"ID":"fa91f143-fe56-4b5b-829d-4a2365ee55a2","Type":"ContainerStarted","Data":"d02bcf81d9faf3a14f504748cffd31b6e2c0d9b42e024536c57750c0ce678b13"} Nov 25 09:46:31 crc kubenswrapper[4734]: I1125 09:46:31.348591 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" event={"ID":"fa91f143-fe56-4b5b-829d-4a2365ee55a2","Type":"ContainerStarted","Data":"d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1"} Nov 25 09:46:31 crc kubenswrapper[4734]: I1125 09:46:31.348893 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:47:01 crc kubenswrapper[4734]: I1125 09:47:01.409364 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:47:01 crc kubenswrapper[4734]: I1125 09:47:01.428933 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" podStartSLOduration=32.428910897 podStartE2EDuration="32.428910897s" podCreationTimestamp="2025-11-25 09:46:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:31.365732686 +0000 UTC m=+1114.176194680" watchObservedRunningTime="2025-11-25 09:47:01.428910897 +0000 UTC m=+1144.239372901" Nov 25 09:47:02 crc kubenswrapper[4734]: E1125 09:47:02.837225 4734 log.go:32] "Failed when writing line to log file" err="http2: stream closed" path="/var/log/pods/keystone-kuttl-tests_keystone-6fb9f67d95-z2gh9_fa91f143-fe56-4b5b-829d-4a2365ee55a2/keystone-api/0.log" line={} Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.255952 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-6965668b5b-nkgb2"] Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.257167 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.270743 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6965668b5b-nkgb2"] Nov 25 09:47:03 crc kubenswrapper[4734]: E1125 09:47:03.321344 4734 log.go:32] "Failed when writing line to log file" err="http2: stream closed" path="/var/log/pods/keystone-kuttl-tests_keystone-6fb9f67d95-z2gh9_fa91f143-fe56-4b5b-829d-4a2365ee55a2/keystone-api/0.log" line={} Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.401828 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.401912 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.401939 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-257p2\" (UniqueName: \"kubernetes.io/projected/3650918b-1ce5-4ee2-803a-066d200c67be-kube-api-access-257p2\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.401966 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.402042 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.503280 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.503367 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.503390 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-257p2\" (UniqueName: \"kubernetes.io/projected/3650918b-1ce5-4ee2-803a-066d200c67be-kube-api-access-257p2\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.503416 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.503478 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.511104 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.511274 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.511756 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.513023 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.528404 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-257p2\" (UniqueName: \"kubernetes.io/projected/3650918b-1ce5-4ee2-803a-066d200c67be-kube-api-access-257p2\") pod \"keystone-6965668b5b-nkgb2\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.597986 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:03 crc kubenswrapper[4734]: I1125 09:47:03.995147 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6965668b5b-nkgb2"] Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.575476 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" event={"ID":"3650918b-1ce5-4ee2-803a-066d200c67be","Type":"ContainerStarted","Data":"b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6"} Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.575807 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.575822 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" event={"ID":"3650918b-1ce5-4ee2-803a-066d200c67be","Type":"ContainerStarted","Data":"eb203aaa3b08119cd2e9131c443618d71998f5d728093b778c0bfaa3a450ff48"} Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.594242 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" podStartSLOduration=1.5942217969999999 podStartE2EDuration="1.594221797s" podCreationTimestamp="2025-11-25 09:47:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:47:04.591382286 +0000 UTC m=+1147.401844300" watchObservedRunningTime="2025-11-25 09:47:04.594221797 +0000 UTC m=+1147.404683801" Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.692925 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-xvp4p"] Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.699793 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-xvp4p"] Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.708858 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-hlcmd"] Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.715152 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-hlcmd"] Nov 25 09:47:04 crc kubenswrapper[4734]: E1125 09:47:04.721347 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 09:47:04 crc kubenswrapper[4734]: E1125 09:47:04.721387 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 09:47:04 crc kubenswrapper[4734]: E1125 09:47:04.721437 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:05.221405552 +0000 UTC m=+1148.031867546 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone-scripts" not found Nov 25 09:47:04 crc kubenswrapper[4734]: E1125 09:47:04.721465 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 09:47:04 crc kubenswrapper[4734]: E1125 09:47:04.721511 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:05.221481694 +0000 UTC m=+1148.031943688 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone-config-data" not found Nov 25 09:47:04 crc kubenswrapper[4734]: E1125 09:47:04.721536 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:05.221525446 +0000 UTC m=+1148.031987430 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone" not found Nov 25 09:47:04 crc kubenswrapper[4734]: E1125 09:47:04.721421 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 09:47:04 crc kubenswrapper[4734]: E1125 09:47:04.721574 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:05.221568827 +0000 UTC m=+1148.032030821 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone" not found Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.724378 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6965668b5b-nkgb2"] Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.730174 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9"] Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.730904 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" podUID="fa91f143-fe56-4b5b-829d-4a2365ee55a2" containerName="keystone-api" containerID="cri-o://d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1" gracePeriod=30 Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.767861 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone2100-account-delete-blqct"] Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.768791 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.777415 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone2100-account-delete-blqct"] Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.924327 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05d3db47-b909-4369-a9c4-6c47dae3190e-operator-scripts\") pod \"keystone2100-account-delete-blqct\" (UID: \"05d3db47-b909-4369-a9c4-6c47dae3190e\") " pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:04 crc kubenswrapper[4734]: I1125 09:47:04.924420 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77bcn\" (UniqueName: \"kubernetes.io/projected/05d3db47-b909-4369-a9c4-6c47dae3190e-kube-api-access-77bcn\") pod \"keystone2100-account-delete-blqct\" (UID: \"05d3db47-b909-4369-a9c4-6c47dae3190e\") " pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:05 crc kubenswrapper[4734]: I1125 09:47:05.025666 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05d3db47-b909-4369-a9c4-6c47dae3190e-operator-scripts\") pod \"keystone2100-account-delete-blqct\" (UID: \"05d3db47-b909-4369-a9c4-6c47dae3190e\") " pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:05 crc kubenswrapper[4734]: I1125 09:47:05.025743 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77bcn\" (UniqueName: \"kubernetes.io/projected/05d3db47-b909-4369-a9c4-6c47dae3190e-kube-api-access-77bcn\") pod \"keystone2100-account-delete-blqct\" (UID: \"05d3db47-b909-4369-a9c4-6c47dae3190e\") " pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:05 crc kubenswrapper[4734]: I1125 09:47:05.026581 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05d3db47-b909-4369-a9c4-6c47dae3190e-operator-scripts\") pod \"keystone2100-account-delete-blqct\" (UID: \"05d3db47-b909-4369-a9c4-6c47dae3190e\") " pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:05 crc kubenswrapper[4734]: I1125 09:47:05.044491 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77bcn\" (UniqueName: \"kubernetes.io/projected/05d3db47-b909-4369-a9c4-6c47dae3190e-kube-api-access-77bcn\") pod \"keystone2100-account-delete-blqct\" (UID: \"05d3db47-b909-4369-a9c4-6c47dae3190e\") " pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:05 crc kubenswrapper[4734]: I1125 09:47:05.092423 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:05 crc kubenswrapper[4734]: E1125 09:47:05.229878 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 09:47:05 crc kubenswrapper[4734]: E1125 09:47:05.230466 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:06.230438499 +0000 UTC m=+1149.040900483 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone-config-data" not found Nov 25 09:47:05 crc kubenswrapper[4734]: E1125 09:47:05.229918 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 09:47:05 crc kubenswrapper[4734]: E1125 09:47:05.230006 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 09:47:05 crc kubenswrapper[4734]: E1125 09:47:05.230602 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:06.230576323 +0000 UTC m=+1149.041038317 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone-scripts" not found Nov 25 09:47:05 crc kubenswrapper[4734]: E1125 09:47:05.230660 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:06.230637925 +0000 UTC m=+1149.041100089 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone" not found Nov 25 09:47:05 crc kubenswrapper[4734]: E1125 09:47:05.230027 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 09:47:05 crc kubenswrapper[4734]: E1125 09:47:05.230718 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:06.230704507 +0000 UTC m=+1149.041166561 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone" not found Nov 25 09:47:05 crc kubenswrapper[4734]: I1125 09:47:05.490165 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone2100-account-delete-blqct"] Nov 25 09:47:05 crc kubenswrapper[4734]: I1125 09:47:05.582230 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" event={"ID":"05d3db47-b909-4369-a9c4-6c47dae3190e","Type":"ContainerStarted","Data":"630b5f507f0560fbb0d2af35587806e1845560785a749ce227baaa1eec7db976"} Nov 25 09:47:05 crc kubenswrapper[4734]: I1125 09:47:05.582567 4734 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" secret="" err="secret \"keystone-keystone-dockercfg-kqw8f\" not found" Nov 25 09:47:06 crc kubenswrapper[4734]: E1125 09:47:06.244848 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 09:47:06 crc kubenswrapper[4734]: E1125 09:47:06.245489 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:08.245459398 +0000 UTC m=+1151.055921392 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone" not found Nov 25 09:47:06 crc kubenswrapper[4734]: E1125 09:47:06.244859 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 09:47:06 crc kubenswrapper[4734]: E1125 09:47:06.245012 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 09:47:06 crc kubenswrapper[4734]: E1125 09:47:06.245614 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:08.245590691 +0000 UTC m=+1151.056052675 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone" not found Nov 25 09:47:06 crc kubenswrapper[4734]: E1125 09:47:06.245025 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 09:47:06 crc kubenswrapper[4734]: E1125 09:47:06.245630 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:08.245624662 +0000 UTC m=+1151.056086656 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone-scripts" not found Nov 25 09:47:06 crc kubenswrapper[4734]: E1125 09:47:06.245650 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data podName:3650918b-1ce5-4ee2-803a-066d200c67be nodeName:}" failed. No retries permitted until 2025-11-25 09:47:08.245637483 +0000 UTC m=+1151.056099477 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data") pod "keystone-6965668b5b-nkgb2" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be") : secret "keystone-config-data" not found Nov 25 09:47:06 crc kubenswrapper[4734]: I1125 09:47:06.256731 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f9a1a65-2a28-404e-8964-008e8d7db51e" path="/var/lib/kubelet/pods/3f9a1a65-2a28-404e-8964-008e8d7db51e/volumes" Nov 25 09:47:06 crc kubenswrapper[4734]: I1125 09:47:06.257728 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8ac9888-cdcd-4d41-bc95-8943f5cce007" path="/var/lib/kubelet/pods/f8ac9888-cdcd-4d41-bc95-8943f5cce007/volumes" Nov 25 09:47:06 crc kubenswrapper[4734]: I1125 09:47:06.589582 4734 generic.go:334] "Generic (PLEG): container finished" podID="05d3db47-b909-4369-a9c4-6c47dae3190e" containerID="fff87419dfee11c5a00b2c0638ea0726276ba41cded2019a5a72362b55c89169" exitCode=0 Nov 25 09:47:06 crc kubenswrapper[4734]: I1125 09:47:06.589687 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" event={"ID":"05d3db47-b909-4369-a9c4-6c47dae3190e","Type":"ContainerDied","Data":"fff87419dfee11c5a00b2c0638ea0726276ba41cded2019a5a72362b55c89169"} Nov 25 09:47:06 crc kubenswrapper[4734]: I1125 09:47:06.589824 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" podUID="3650918b-1ce5-4ee2-803a-066d200c67be" containerName="keystone-api" containerID="cri-o://b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6" gracePeriod=30 Nov 25 09:47:06 crc kubenswrapper[4734]: I1125 09:47:06.967508 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.159263 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts\") pod \"3650918b-1ce5-4ee2-803a-066d200c67be\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.159438 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data\") pod \"3650918b-1ce5-4ee2-803a-066d200c67be\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.160293 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys\") pod \"3650918b-1ce5-4ee2-803a-066d200c67be\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.160341 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-257p2\" (UniqueName: \"kubernetes.io/projected/3650918b-1ce5-4ee2-803a-066d200c67be-kube-api-access-257p2\") pod \"3650918b-1ce5-4ee2-803a-066d200c67be\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.160472 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys\") pod \"3650918b-1ce5-4ee2-803a-066d200c67be\" (UID: \"3650918b-1ce5-4ee2-803a-066d200c67be\") " Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.166298 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3650918b-1ce5-4ee2-803a-066d200c67be" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.166289 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3650918b-1ce5-4ee2-803a-066d200c67be" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.166306 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3650918b-1ce5-4ee2-803a-066d200c67be-kube-api-access-257p2" (OuterVolumeSpecName: "kube-api-access-257p2") pod "3650918b-1ce5-4ee2-803a-066d200c67be" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be"). InnerVolumeSpecName "kube-api-access-257p2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.167323 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts" (OuterVolumeSpecName: "scripts") pod "3650918b-1ce5-4ee2-803a-066d200c67be" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.180863 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data" (OuterVolumeSpecName: "config-data") pod "3650918b-1ce5-4ee2-803a-066d200c67be" (UID: "3650918b-1ce5-4ee2-803a-066d200c67be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.263010 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.263058 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.263072 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.263103 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-257p2\" (UniqueName: \"kubernetes.io/projected/3650918b-1ce5-4ee2-803a-066d200c67be-kube-api-access-257p2\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.263118 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3650918b-1ce5-4ee2-803a-066d200c67be-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.597665 4734 generic.go:334] "Generic (PLEG): container finished" podID="3650918b-1ce5-4ee2-803a-066d200c67be" containerID="b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6" exitCode=0 Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.597705 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.597724 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" event={"ID":"3650918b-1ce5-4ee2-803a-066d200c67be","Type":"ContainerDied","Data":"b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6"} Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.597787 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6965668b5b-nkgb2" event={"ID":"3650918b-1ce5-4ee2-803a-066d200c67be","Type":"ContainerDied","Data":"eb203aaa3b08119cd2e9131c443618d71998f5d728093b778c0bfaa3a450ff48"} Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.597818 4734 scope.go:117] "RemoveContainer" containerID="b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.631039 4734 scope.go:117] "RemoveContainer" containerID="b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6" Nov 25 09:47:07 crc kubenswrapper[4734]: E1125 09:47:07.631892 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6\": container with ID starting with b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6 not found: ID does not exist" containerID="b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.631945 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6"} err="failed to get container status \"b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6\": rpc error: code = NotFound desc = could not find container \"b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6\": container with ID starting with b3ef2a0cbe9797661dbd2e1a754a7d7ccfa58bd3b82021182cca248f1561b4b6 not found: ID does not exist" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.634591 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6965668b5b-nkgb2"] Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.638558 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-6965668b5b-nkgb2"] Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.864497 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.975829 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77bcn\" (UniqueName: \"kubernetes.io/projected/05d3db47-b909-4369-a9c4-6c47dae3190e-kube-api-access-77bcn\") pod \"05d3db47-b909-4369-a9c4-6c47dae3190e\" (UID: \"05d3db47-b909-4369-a9c4-6c47dae3190e\") " Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.976273 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05d3db47-b909-4369-a9c4-6c47dae3190e-operator-scripts\") pod \"05d3db47-b909-4369-a9c4-6c47dae3190e\" (UID: \"05d3db47-b909-4369-a9c4-6c47dae3190e\") " Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.976999 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05d3db47-b909-4369-a9c4-6c47dae3190e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "05d3db47-b909-4369-a9c4-6c47dae3190e" (UID: "05d3db47-b909-4369-a9c4-6c47dae3190e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:47:07 crc kubenswrapper[4734]: I1125 09:47:07.980896 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05d3db47-b909-4369-a9c4-6c47dae3190e-kube-api-access-77bcn" (OuterVolumeSpecName: "kube-api-access-77bcn") pod "05d3db47-b909-4369-a9c4-6c47dae3190e" (UID: "05d3db47-b909-4369-a9c4-6c47dae3190e"). InnerVolumeSpecName "kube-api-access-77bcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.078346 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05d3db47-b909-4369-a9c4-6c47dae3190e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.078412 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77bcn\" (UniqueName: \"kubernetes.io/projected/05d3db47-b909-4369-a9c4-6c47dae3190e-kube-api-access-77bcn\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.158752 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.254853 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3650918b-1ce5-4ee2-803a-066d200c67be" path="/var/lib/kubelet/pods/3650918b-1ce5-4ee2-803a-066d200c67be/volumes" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.281565 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-scripts\") pod \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.281634 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-credential-keys\") pod \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.281935 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-fernet-keys\") pod \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.282047 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-config-data\") pod \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.282154 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cqkb\" (UniqueName: \"kubernetes.io/projected/fa91f143-fe56-4b5b-829d-4a2365ee55a2-kube-api-access-6cqkb\") pod \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\" (UID: \"fa91f143-fe56-4b5b-829d-4a2365ee55a2\") " Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.284909 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "fa91f143-fe56-4b5b-829d-4a2365ee55a2" (UID: "fa91f143-fe56-4b5b-829d-4a2365ee55a2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.285186 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-scripts" (OuterVolumeSpecName: "scripts") pod "fa91f143-fe56-4b5b-829d-4a2365ee55a2" (UID: "fa91f143-fe56-4b5b-829d-4a2365ee55a2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.285740 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa91f143-fe56-4b5b-829d-4a2365ee55a2-kube-api-access-6cqkb" (OuterVolumeSpecName: "kube-api-access-6cqkb") pod "fa91f143-fe56-4b5b-829d-4a2365ee55a2" (UID: "fa91f143-fe56-4b5b-829d-4a2365ee55a2"). InnerVolumeSpecName "kube-api-access-6cqkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.286222 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "fa91f143-fe56-4b5b-829d-4a2365ee55a2" (UID: "fa91f143-fe56-4b5b-829d-4a2365ee55a2"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.300526 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-config-data" (OuterVolumeSpecName: "config-data") pod "fa91f143-fe56-4b5b-829d-4a2365ee55a2" (UID: "fa91f143-fe56-4b5b-829d-4a2365ee55a2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.383414 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.383461 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cqkb\" (UniqueName: \"kubernetes.io/projected/fa91f143-fe56-4b5b-829d-4a2365ee55a2-kube-api-access-6cqkb\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.383471 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.383480 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.383489 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa91f143-fe56-4b5b-829d-4a2365ee55a2-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.607897 4734 generic.go:334] "Generic (PLEG): container finished" podID="fa91f143-fe56-4b5b-829d-4a2365ee55a2" containerID="d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1" exitCode=0 Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.607989 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" event={"ID":"fa91f143-fe56-4b5b-829d-4a2365ee55a2","Type":"ContainerDied","Data":"d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1"} Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.608029 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" event={"ID":"fa91f143-fe56-4b5b-829d-4a2365ee55a2","Type":"ContainerDied","Data":"d02bcf81d9faf3a14f504748cffd31b6e2c0d9b42e024536c57750c0ce678b13"} Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.608051 4734 scope.go:117] "RemoveContainer" containerID="d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.608419 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.610897 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" event={"ID":"05d3db47-b909-4369-a9c4-6c47dae3190e","Type":"ContainerDied","Data":"630b5f507f0560fbb0d2af35587806e1845560785a749ce227baaa1eec7db976"} Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.610957 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="630b5f507f0560fbb0d2af35587806e1845560785a749ce227baaa1eec7db976" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.610986 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone2100-account-delete-blqct" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.630026 4734 scope.go:117] "RemoveContainer" containerID="d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1" Nov 25 09:47:08 crc kubenswrapper[4734]: E1125 09:47:08.630635 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1\": container with ID starting with d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1 not found: ID does not exist" containerID="d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.630747 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1"} err="failed to get container status \"d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1\": rpc error: code = NotFound desc = could not find container \"d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1\": container with ID starting with d56a0ab78cb3df88762be19b0bb35f61ca175e724afd5774e21a0142f775f2c1 not found: ID does not exist" Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.643871 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9"] Nov 25 09:47:08 crc kubenswrapper[4734]: I1125 09:47:08.648393 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-6fb9f67d95-z2gh9"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.790451 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-sndrq"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.794904 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-sndrq"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.807498 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone2100-account-delete-blqct"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.814179 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone2100-account-delete-blqct"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.819532 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.824483 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-2100-account-create-update-vxxmd"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.884188 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkqft"] Nov 25 09:47:09 crc kubenswrapper[4734]: E1125 09:47:09.884475 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3650918b-1ce5-4ee2-803a-066d200c67be" containerName="keystone-api" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.884493 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="3650918b-1ce5-4ee2-803a-066d200c67be" containerName="keystone-api" Nov 25 09:47:09 crc kubenswrapper[4734]: E1125 09:47:09.884507 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa91f143-fe56-4b5b-829d-4a2365ee55a2" containerName="keystone-api" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.884515 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa91f143-fe56-4b5b-829d-4a2365ee55a2" containerName="keystone-api" Nov 25 09:47:09 crc kubenswrapper[4734]: E1125 09:47:09.884538 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05d3db47-b909-4369-a9c4-6c47dae3190e" containerName="mariadb-account-delete" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.884546 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="05d3db47-b909-4369-a9c4-6c47dae3190e" containerName="mariadb-account-delete" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.884670 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="05d3db47-b909-4369-a9c4-6c47dae3190e" containerName="mariadb-account-delete" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.884681 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa91f143-fe56-4b5b-829d-4a2365ee55a2" containerName="keystone-api" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.884691 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="3650918b-1ce5-4ee2-803a-066d200c67be" containerName="keystone-api" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.885168 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.894270 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkqft"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.990189 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8"] Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.990895 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:09 crc kubenswrapper[4734]: I1125 09:47:09.992875 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-db-secret" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.002039 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8"] Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.006177 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa64dc-7838-4fef-9a31-4033be9913df-operator-scripts\") pod \"keystone-db-create-mkqft\" (UID: \"27fa64dc-7838-4fef-9a31-4033be9913df\") " pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.006262 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbsk7\" (UniqueName: \"kubernetes.io/projected/27fa64dc-7838-4fef-9a31-4033be9913df-kube-api-access-pbsk7\") pod \"keystone-db-create-mkqft\" (UID: \"27fa64dc-7838-4fef-9a31-4033be9913df\") " pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.107215 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa64dc-7838-4fef-9a31-4033be9913df-operator-scripts\") pod \"keystone-db-create-mkqft\" (UID: \"27fa64dc-7838-4fef-9a31-4033be9913df\") " pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.107271 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb37b695-6599-4b41-b675-ec6ffa470d35-operator-scripts\") pod \"keystone-fb32-account-create-update-m8jp8\" (UID: \"eb37b695-6599-4b41-b675-ec6ffa470d35\") " pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.107304 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6x9g\" (UniqueName: \"kubernetes.io/projected/eb37b695-6599-4b41-b675-ec6ffa470d35-kube-api-access-z6x9g\") pod \"keystone-fb32-account-create-update-m8jp8\" (UID: \"eb37b695-6599-4b41-b675-ec6ffa470d35\") " pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.107330 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbsk7\" (UniqueName: \"kubernetes.io/projected/27fa64dc-7838-4fef-9a31-4033be9913df-kube-api-access-pbsk7\") pod \"keystone-db-create-mkqft\" (UID: \"27fa64dc-7838-4fef-9a31-4033be9913df\") " pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.107997 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa64dc-7838-4fef-9a31-4033be9913df-operator-scripts\") pod \"keystone-db-create-mkqft\" (UID: \"27fa64dc-7838-4fef-9a31-4033be9913df\") " pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.126269 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbsk7\" (UniqueName: \"kubernetes.io/projected/27fa64dc-7838-4fef-9a31-4033be9913df-kube-api-access-pbsk7\") pod \"keystone-db-create-mkqft\" (UID: \"27fa64dc-7838-4fef-9a31-4033be9913df\") " pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.204074 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.208795 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb37b695-6599-4b41-b675-ec6ffa470d35-operator-scripts\") pod \"keystone-fb32-account-create-update-m8jp8\" (UID: \"eb37b695-6599-4b41-b675-ec6ffa470d35\") " pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.208891 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6x9g\" (UniqueName: \"kubernetes.io/projected/eb37b695-6599-4b41-b675-ec6ffa470d35-kube-api-access-z6x9g\") pod \"keystone-fb32-account-create-update-m8jp8\" (UID: \"eb37b695-6599-4b41-b675-ec6ffa470d35\") " pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.209822 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb37b695-6599-4b41-b675-ec6ffa470d35-operator-scripts\") pod \"keystone-fb32-account-create-update-m8jp8\" (UID: \"eb37b695-6599-4b41-b675-ec6ffa470d35\") " pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.229770 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6x9g\" (UniqueName: \"kubernetes.io/projected/eb37b695-6599-4b41-b675-ec6ffa470d35-kube-api-access-z6x9g\") pod \"keystone-fb32-account-create-update-m8jp8\" (UID: \"eb37b695-6599-4b41-b675-ec6ffa470d35\") " pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.261006 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da" path="/var/lib/kubelet/pods/0169c781-3c9b-4bf0-ae20-2d6b1bc2a1da/volumes" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.261661 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05d3db47-b909-4369-a9c4-6c47dae3190e" path="/var/lib/kubelet/pods/05d3db47-b909-4369-a9c4-6c47dae3190e/volumes" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.262265 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d678534-ab47-45c3-9dea-e54fd17540d0" path="/var/lib/kubelet/pods/4d678534-ab47-45c3-9dea-e54fd17540d0/volumes" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.263486 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa91f143-fe56-4b5b-829d-4a2365ee55a2" path="/var/lib/kubelet/pods/fa91f143-fe56-4b5b-829d-4a2365ee55a2/volumes" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.309019 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.603026 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkqft"] Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.626898 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-mkqft" event={"ID":"27fa64dc-7838-4fef-9a31-4033be9913df","Type":"ContainerStarted","Data":"5682adda46c212623bf42a07604e2a30e6d7c10477b37ef793cc1761d09b1810"} Nov 25 09:47:10 crc kubenswrapper[4734]: I1125 09:47:10.713469 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8"] Nov 25 09:47:10 crc kubenswrapper[4734]: W1125 09:47:10.717407 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb37b695_6599_4b41_b675_ec6ffa470d35.slice/crio-f8b97d8f1fe273b205b9c98179c1ff96a4d5079566e838b11cd7f6a3b2519ceb WatchSource:0}: Error finding container f8b97d8f1fe273b205b9c98179c1ff96a4d5079566e838b11cd7f6a3b2519ceb: Status 404 returned error can't find the container with id f8b97d8f1fe273b205b9c98179c1ff96a4d5079566e838b11cd7f6a3b2519ceb Nov 25 09:47:11 crc kubenswrapper[4734]: I1125 09:47:11.639862 4734 generic.go:334] "Generic (PLEG): container finished" podID="27fa64dc-7838-4fef-9a31-4033be9913df" containerID="3891cdaa4db16c256383ab0bfc872b898a7ee09834bafdb7742863b0dfb4f716" exitCode=0 Nov 25 09:47:11 crc kubenswrapper[4734]: I1125 09:47:11.639941 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-mkqft" event={"ID":"27fa64dc-7838-4fef-9a31-4033be9913df","Type":"ContainerDied","Data":"3891cdaa4db16c256383ab0bfc872b898a7ee09834bafdb7742863b0dfb4f716"} Nov 25 09:47:11 crc kubenswrapper[4734]: I1125 09:47:11.643218 4734 generic.go:334] "Generic (PLEG): container finished" podID="eb37b695-6599-4b41-b675-ec6ffa470d35" containerID="7c736635f48a846074173273ba88d79a572f8757627880aa0467ae73996891f5" exitCode=0 Nov 25 09:47:11 crc kubenswrapper[4734]: I1125 09:47:11.643257 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" event={"ID":"eb37b695-6599-4b41-b675-ec6ffa470d35","Type":"ContainerDied","Data":"7c736635f48a846074173273ba88d79a572f8757627880aa0467ae73996891f5"} Nov 25 09:47:11 crc kubenswrapper[4734]: I1125 09:47:11.643284 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" event={"ID":"eb37b695-6599-4b41-b675-ec6ffa470d35","Type":"ContainerStarted","Data":"f8b97d8f1fe273b205b9c98179c1ff96a4d5079566e838b11cd7f6a3b2519ceb"} Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.005882 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.011026 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.147853 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6x9g\" (UniqueName: \"kubernetes.io/projected/eb37b695-6599-4b41-b675-ec6ffa470d35-kube-api-access-z6x9g\") pod \"eb37b695-6599-4b41-b675-ec6ffa470d35\" (UID: \"eb37b695-6599-4b41-b675-ec6ffa470d35\") " Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.148038 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa64dc-7838-4fef-9a31-4033be9913df-operator-scripts\") pod \"27fa64dc-7838-4fef-9a31-4033be9913df\" (UID: \"27fa64dc-7838-4fef-9a31-4033be9913df\") " Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.148325 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbsk7\" (UniqueName: \"kubernetes.io/projected/27fa64dc-7838-4fef-9a31-4033be9913df-kube-api-access-pbsk7\") pod \"27fa64dc-7838-4fef-9a31-4033be9913df\" (UID: \"27fa64dc-7838-4fef-9a31-4033be9913df\") " Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.148402 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb37b695-6599-4b41-b675-ec6ffa470d35-operator-scripts\") pod \"eb37b695-6599-4b41-b675-ec6ffa470d35\" (UID: \"eb37b695-6599-4b41-b675-ec6ffa470d35\") " Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.148829 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27fa64dc-7838-4fef-9a31-4033be9913df-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27fa64dc-7838-4fef-9a31-4033be9913df" (UID: "27fa64dc-7838-4fef-9a31-4033be9913df"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.149241 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb37b695-6599-4b41-b675-ec6ffa470d35-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eb37b695-6599-4b41-b675-ec6ffa470d35" (UID: "eb37b695-6599-4b41-b675-ec6ffa470d35"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.155322 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27fa64dc-7838-4fef-9a31-4033be9913df-kube-api-access-pbsk7" (OuterVolumeSpecName: "kube-api-access-pbsk7") pod "27fa64dc-7838-4fef-9a31-4033be9913df" (UID: "27fa64dc-7838-4fef-9a31-4033be9913df"). InnerVolumeSpecName "kube-api-access-pbsk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.155408 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb37b695-6599-4b41-b675-ec6ffa470d35-kube-api-access-z6x9g" (OuterVolumeSpecName: "kube-api-access-z6x9g") pod "eb37b695-6599-4b41-b675-ec6ffa470d35" (UID: "eb37b695-6599-4b41-b675-ec6ffa470d35"). InnerVolumeSpecName "kube-api-access-z6x9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.249574 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27fa64dc-7838-4fef-9a31-4033be9913df-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.249605 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbsk7\" (UniqueName: \"kubernetes.io/projected/27fa64dc-7838-4fef-9a31-4033be9913df-kube-api-access-pbsk7\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.249616 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb37b695-6599-4b41-b675-ec6ffa470d35-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.249624 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6x9g\" (UniqueName: \"kubernetes.io/projected/eb37b695-6599-4b41-b675-ec6ffa470d35-kube-api-access-z6x9g\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.655518 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-mkqft" event={"ID":"27fa64dc-7838-4fef-9a31-4033be9913df","Type":"ContainerDied","Data":"5682adda46c212623bf42a07604e2a30e6d7c10477b37ef793cc1761d09b1810"} Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.655554 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5682adda46c212623bf42a07604e2a30e6d7c10477b37ef793cc1761d09b1810" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.655537 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-mkqft" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.657606 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" event={"ID":"eb37b695-6599-4b41-b675-ec6ffa470d35","Type":"ContainerDied","Data":"f8b97d8f1fe273b205b9c98179c1ff96a4d5079566e838b11cd7f6a3b2519ceb"} Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.657633 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8b97d8f1fe273b205b9c98179c1ff96a4d5079566e838b11cd7f6a3b2519ceb" Nov 25 09:47:13 crc kubenswrapper[4734]: I1125 09:47:13.657640 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.545325 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-rc62x"] Nov 25 09:47:15 crc kubenswrapper[4734]: E1125 09:47:15.545616 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27fa64dc-7838-4fef-9a31-4033be9913df" containerName="mariadb-database-create" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.545635 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="27fa64dc-7838-4fef-9a31-4033be9913df" containerName="mariadb-database-create" Nov 25 09:47:15 crc kubenswrapper[4734]: E1125 09:47:15.545666 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb37b695-6599-4b41-b675-ec6ffa470d35" containerName="mariadb-account-create-update" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.545675 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb37b695-6599-4b41-b675-ec6ffa470d35" containerName="mariadb-account-create-update" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.545825 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="27fa64dc-7838-4fef-9a31-4033be9913df" containerName="mariadb-database-create" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.545848 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb37b695-6599-4b41-b675-ec6ffa470d35" containerName="mariadb-account-create-update" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.546375 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.552832 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.552832 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.553366 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.553493 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-7bqhb" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.557789 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-rc62x"] Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.682343 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dff36379-a812-41c5-ad89-4d0e9065bfe5-config-data\") pod \"keystone-db-sync-rc62x\" (UID: \"dff36379-a812-41c5-ad89-4d0e9065bfe5\") " pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.682520 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qg7q\" (UniqueName: \"kubernetes.io/projected/dff36379-a812-41c5-ad89-4d0e9065bfe5-kube-api-access-6qg7q\") pod \"keystone-db-sync-rc62x\" (UID: \"dff36379-a812-41c5-ad89-4d0e9065bfe5\") " pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.783548 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qg7q\" (UniqueName: \"kubernetes.io/projected/dff36379-a812-41c5-ad89-4d0e9065bfe5-kube-api-access-6qg7q\") pod \"keystone-db-sync-rc62x\" (UID: \"dff36379-a812-41c5-ad89-4d0e9065bfe5\") " pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.783636 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dff36379-a812-41c5-ad89-4d0e9065bfe5-config-data\") pod \"keystone-db-sync-rc62x\" (UID: \"dff36379-a812-41c5-ad89-4d0e9065bfe5\") " pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.789112 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dff36379-a812-41c5-ad89-4d0e9065bfe5-config-data\") pod \"keystone-db-sync-rc62x\" (UID: \"dff36379-a812-41c5-ad89-4d0e9065bfe5\") " pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.801704 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qg7q\" (UniqueName: \"kubernetes.io/projected/dff36379-a812-41c5-ad89-4d0e9065bfe5-kube-api-access-6qg7q\") pod \"keystone-db-sync-rc62x\" (UID: \"dff36379-a812-41c5-ad89-4d0e9065bfe5\") " pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:15 crc kubenswrapper[4734]: I1125 09:47:15.865831 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:16 crc kubenswrapper[4734]: I1125 09:47:16.271953 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-rc62x"] Nov 25 09:47:16 crc kubenswrapper[4734]: I1125 09:47:16.678320 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" event={"ID":"dff36379-a812-41c5-ad89-4d0e9065bfe5","Type":"ContainerStarted","Data":"9e8e2ad70f582808b2b274f5f4fd1381480ec8a2fef9215dd160f163dd827350"} Nov 25 09:47:16 crc kubenswrapper[4734]: I1125 09:47:16.678656 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" event={"ID":"dff36379-a812-41c5-ad89-4d0e9065bfe5","Type":"ContainerStarted","Data":"d7d9856f5d3c4957a68ea16386d6c968377444597afbbd6177aae2a2a18c7ce3"} Nov 25 09:47:16 crc kubenswrapper[4734]: I1125 09:47:16.695334 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" podStartSLOduration=1.695317271 podStartE2EDuration="1.695317271s" podCreationTimestamp="2025-11-25 09:47:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:47:16.691672217 +0000 UTC m=+1159.502134211" watchObservedRunningTime="2025-11-25 09:47:16.695317271 +0000 UTC m=+1159.505779265" Nov 25 09:47:18 crc kubenswrapper[4734]: I1125 09:47:18.695691 4734 generic.go:334] "Generic (PLEG): container finished" podID="dff36379-a812-41c5-ad89-4d0e9065bfe5" containerID="9e8e2ad70f582808b2b274f5f4fd1381480ec8a2fef9215dd160f163dd827350" exitCode=0 Nov 25 09:47:18 crc kubenswrapper[4734]: I1125 09:47:18.695759 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" event={"ID":"dff36379-a812-41c5-ad89-4d0e9065bfe5","Type":"ContainerDied","Data":"9e8e2ad70f582808b2b274f5f4fd1381480ec8a2fef9215dd160f163dd827350"} Nov 25 09:47:19 crc kubenswrapper[4734]: I1125 09:47:19.953241 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.147006 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qg7q\" (UniqueName: \"kubernetes.io/projected/dff36379-a812-41c5-ad89-4d0e9065bfe5-kube-api-access-6qg7q\") pod \"dff36379-a812-41c5-ad89-4d0e9065bfe5\" (UID: \"dff36379-a812-41c5-ad89-4d0e9065bfe5\") " Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.147080 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dff36379-a812-41c5-ad89-4d0e9065bfe5-config-data\") pod \"dff36379-a812-41c5-ad89-4d0e9065bfe5\" (UID: \"dff36379-a812-41c5-ad89-4d0e9065bfe5\") " Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.153047 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dff36379-a812-41c5-ad89-4d0e9065bfe5-kube-api-access-6qg7q" (OuterVolumeSpecName: "kube-api-access-6qg7q") pod "dff36379-a812-41c5-ad89-4d0e9065bfe5" (UID: "dff36379-a812-41c5-ad89-4d0e9065bfe5"). InnerVolumeSpecName "kube-api-access-6qg7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.186008 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dff36379-a812-41c5-ad89-4d0e9065bfe5-config-data" (OuterVolumeSpecName: "config-data") pod "dff36379-a812-41c5-ad89-4d0e9065bfe5" (UID: "dff36379-a812-41c5-ad89-4d0e9065bfe5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.249584 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qg7q\" (UniqueName: \"kubernetes.io/projected/dff36379-a812-41c5-ad89-4d0e9065bfe5-kube-api-access-6qg7q\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.249648 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dff36379-a812-41c5-ad89-4d0e9065bfe5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.695915 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.695986 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.713389 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" event={"ID":"dff36379-a812-41c5-ad89-4d0e9065bfe5","Type":"ContainerDied","Data":"d7d9856f5d3c4957a68ea16386d6c968377444597afbbd6177aae2a2a18c7ce3"} Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.713441 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d7d9856f5d3c4957a68ea16386d6c968377444597afbbd6177aae2a2a18c7ce3" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.713477 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-rc62x" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.891775 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-pdk5l"] Nov 25 09:47:20 crc kubenswrapper[4734]: E1125 09:47:20.892319 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff36379-a812-41c5-ad89-4d0e9065bfe5" containerName="keystone-db-sync" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.892380 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff36379-a812-41c5-ad89-4d0e9065bfe5" containerName="keystone-db-sync" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.893205 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="dff36379-a812-41c5-ad89-4d0e9065bfe5" containerName="keystone-db-sync" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.893880 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.898484 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.902320 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"osp-secret" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.902755 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.902980 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-7bqhb" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.903221 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:47:20 crc kubenswrapper[4734]: I1125 09:47:20.904879 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-pdk5l"] Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.064399 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-credential-keys\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.064469 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-config-data\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.064519 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-scripts\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.064533 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-fernet-keys\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.064558 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgkpx\" (UniqueName: \"kubernetes.io/projected/d956d9d8-006f-4404-9917-0178e765a9dc-kube-api-access-pgkpx\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.166275 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-config-data\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.166356 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-scripts\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.166377 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-fernet-keys\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.166401 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgkpx\" (UniqueName: \"kubernetes.io/projected/d956d9d8-006f-4404-9917-0178e765a9dc-kube-api-access-pgkpx\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.166443 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-credential-keys\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.170310 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-scripts\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.170715 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-credential-keys\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.171361 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-fernet-keys\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.181043 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-config-data\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.182901 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgkpx\" (UniqueName: \"kubernetes.io/projected/d956d9d8-006f-4404-9917-0178e765a9dc-kube-api-access-pgkpx\") pod \"keystone-bootstrap-pdk5l\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.216396 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.649220 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-pdk5l"] Nov 25 09:47:21 crc kubenswrapper[4734]: W1125 09:47:21.661005 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd956d9d8_006f_4404_9917_0178e765a9dc.slice/crio-16f79f7bfb2d03701be0836a35bc63b8435e60f957777e072c297cfa67a33631 WatchSource:0}: Error finding container 16f79f7bfb2d03701be0836a35bc63b8435e60f957777e072c297cfa67a33631: Status 404 returned error can't find the container with id 16f79f7bfb2d03701be0836a35bc63b8435e60f957777e072c297cfa67a33631 Nov 25 09:47:21 crc kubenswrapper[4734]: I1125 09:47:21.723856 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" event={"ID":"d956d9d8-006f-4404-9917-0178e765a9dc","Type":"ContainerStarted","Data":"16f79f7bfb2d03701be0836a35bc63b8435e60f957777e072c297cfa67a33631"} Nov 25 09:47:22 crc kubenswrapper[4734]: I1125 09:47:22.731722 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" event={"ID":"d956d9d8-006f-4404-9917-0178e765a9dc","Type":"ContainerStarted","Data":"8b9467783608c58691954bfe0960b0875d3d66282949bfeb3e7f6d7a96a5562e"} Nov 25 09:47:22 crc kubenswrapper[4734]: I1125 09:47:22.751206 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" podStartSLOduration=2.751182382 podStartE2EDuration="2.751182382s" podCreationTimestamp="2025-11-25 09:47:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:47:22.746531099 +0000 UTC m=+1165.556993093" watchObservedRunningTime="2025-11-25 09:47:22.751182382 +0000 UTC m=+1165.561644376" Nov 25 09:47:24 crc kubenswrapper[4734]: I1125 09:47:24.746689 4734 generic.go:334] "Generic (PLEG): container finished" podID="d956d9d8-006f-4404-9917-0178e765a9dc" containerID="8b9467783608c58691954bfe0960b0875d3d66282949bfeb3e7f6d7a96a5562e" exitCode=0 Nov 25 09:47:24 crc kubenswrapper[4734]: I1125 09:47:24.746792 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" event={"ID":"d956d9d8-006f-4404-9917-0178e765a9dc","Type":"ContainerDied","Data":"8b9467783608c58691954bfe0960b0875d3d66282949bfeb3e7f6d7a96a5562e"} Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.021411 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.144094 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-config-data\") pod \"d956d9d8-006f-4404-9917-0178e765a9dc\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.144172 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-fernet-keys\") pod \"d956d9d8-006f-4404-9917-0178e765a9dc\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.144212 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgkpx\" (UniqueName: \"kubernetes.io/projected/d956d9d8-006f-4404-9917-0178e765a9dc-kube-api-access-pgkpx\") pod \"d956d9d8-006f-4404-9917-0178e765a9dc\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.144268 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-credential-keys\") pod \"d956d9d8-006f-4404-9917-0178e765a9dc\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.144317 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-scripts\") pod \"d956d9d8-006f-4404-9917-0178e765a9dc\" (UID: \"d956d9d8-006f-4404-9917-0178e765a9dc\") " Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.149641 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-scripts" (OuterVolumeSpecName: "scripts") pod "d956d9d8-006f-4404-9917-0178e765a9dc" (UID: "d956d9d8-006f-4404-9917-0178e765a9dc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.149715 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d956d9d8-006f-4404-9917-0178e765a9dc" (UID: "d956d9d8-006f-4404-9917-0178e765a9dc"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.150120 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d956d9d8-006f-4404-9917-0178e765a9dc-kube-api-access-pgkpx" (OuterVolumeSpecName: "kube-api-access-pgkpx") pod "d956d9d8-006f-4404-9917-0178e765a9dc" (UID: "d956d9d8-006f-4404-9917-0178e765a9dc"). InnerVolumeSpecName "kube-api-access-pgkpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.150869 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "d956d9d8-006f-4404-9917-0178e765a9dc" (UID: "d956d9d8-006f-4404-9917-0178e765a9dc"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.166431 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-config-data" (OuterVolumeSpecName: "config-data") pod "d956d9d8-006f-4404-9917-0178e765a9dc" (UID: "d956d9d8-006f-4404-9917-0178e765a9dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.245360 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.245397 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.245409 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgkpx\" (UniqueName: \"kubernetes.io/projected/d956d9d8-006f-4404-9917-0178e765a9dc-kube-api-access-pgkpx\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.245419 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.245429 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d956d9d8-006f-4404-9917-0178e765a9dc-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.761582 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" event={"ID":"d956d9d8-006f-4404-9917-0178e765a9dc","Type":"ContainerDied","Data":"16f79f7bfb2d03701be0836a35bc63b8435e60f957777e072c297cfa67a33631"} Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.761927 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16f79f7bfb2d03701be0836a35bc63b8435e60f957777e072c297cfa67a33631" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.761650 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-pdk5l" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.831358 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-dwh45"] Nov 25 09:47:26 crc kubenswrapper[4734]: E1125 09:47:26.831672 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d956d9d8-006f-4404-9917-0178e765a9dc" containerName="keystone-bootstrap" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.831695 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="d956d9d8-006f-4404-9917-0178e765a9dc" containerName="keystone-bootstrap" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.831872 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="d956d9d8-006f-4404-9917-0178e765a9dc" containerName="keystone-bootstrap" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.832444 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.834624 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.834624 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-7bqhb" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.835279 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.836039 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.846016 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-dwh45"] Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.854654 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-fernet-keys\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.854709 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-config-data\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.854737 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-scripts\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.854756 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wncql\" (UniqueName: \"kubernetes.io/projected/2bb041e1-7942-46d6-b64e-64c4de2cc86d-kube-api-access-wncql\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.854786 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-credential-keys\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.955796 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-scripts\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.955842 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wncql\" (UniqueName: \"kubernetes.io/projected/2bb041e1-7942-46d6-b64e-64c4de2cc86d-kube-api-access-wncql\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.955898 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-credential-keys\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.955941 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-fernet-keys\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.955975 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-config-data\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.960106 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-scripts\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.960212 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-credential-keys\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.961620 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-config-data\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.963924 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-fernet-keys\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:26 crc kubenswrapper[4734]: I1125 09:47:26.975516 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wncql\" (UniqueName: \"kubernetes.io/projected/2bb041e1-7942-46d6-b64e-64c4de2cc86d-kube-api-access-wncql\") pod \"keystone-6d4fb875b7-dwh45\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:27 crc kubenswrapper[4734]: I1125 09:47:27.149879 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:27 crc kubenswrapper[4734]: I1125 09:47:27.542597 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-dwh45"] Nov 25 09:47:27 crc kubenswrapper[4734]: I1125 09:47:27.770125 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" event={"ID":"2bb041e1-7942-46d6-b64e-64c4de2cc86d","Type":"ContainerStarted","Data":"4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd"} Nov 25 09:47:27 crc kubenswrapper[4734]: I1125 09:47:27.770174 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" event={"ID":"2bb041e1-7942-46d6-b64e-64c4de2cc86d","Type":"ContainerStarted","Data":"974c893d475e15716e4f2bcdf09759a8c16aec72ce9fb23094455e104fbd04ba"} Nov 25 09:47:27 crc kubenswrapper[4734]: I1125 09:47:27.770280 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:47:27 crc kubenswrapper[4734]: I1125 09:47:27.788241 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" podStartSLOduration=1.788224015 podStartE2EDuration="1.788224015s" podCreationTimestamp="2025-11-25 09:47:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:47:27.786765733 +0000 UTC m=+1170.597227757" watchObservedRunningTime="2025-11-25 09:47:27.788224015 +0000 UTC m=+1170.598686009" Nov 25 09:47:50 crc kubenswrapper[4734]: I1125 09:47:50.696413 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:47:50 crc kubenswrapper[4734]: I1125 09:47:50.697013 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:47:58 crc kubenswrapper[4734]: I1125 09:47:58.788797 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.117728 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-2njc6"] Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.118719 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.126304 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6"] Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.127148 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.136034 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-2njc6"] Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.151660 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6"] Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.253770 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m54fk\" (UniqueName: \"kubernetes.io/projected/48c62c70-f3bb-4d8d-a535-181ea2f57c13-kube-api-access-m54fk\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.254150 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-credential-keys\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.254315 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-config-data\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.254440 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-fernet-keys\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.254755 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-config-data\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.254793 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-credential-keys\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.254830 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-scripts\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.254850 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-fernet-keys\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.254983 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6g2f9\" (UniqueName: \"kubernetes.io/projected/472c1b96-2dfa-4520-8f64-6040e4b4d7da-kube-api-access-6g2f9\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.255013 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-scripts\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.356662 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-config-data\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.356951 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-fernet-keys\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.357101 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-credential-keys\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.357193 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-config-data\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.357270 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-scripts\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.357354 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-fernet-keys\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.357471 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6g2f9\" (UniqueName: \"kubernetes.io/projected/472c1b96-2dfa-4520-8f64-6040e4b4d7da-kube-api-access-6g2f9\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.357547 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-scripts\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.357623 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m54fk\" (UniqueName: \"kubernetes.io/projected/48c62c70-f3bb-4d8d-a535-181ea2f57c13-kube-api-access-m54fk\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.357728 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-credential-keys\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.363723 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-scripts\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.364404 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-scripts\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.364723 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-fernet-keys\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.365231 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-credential-keys\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.365673 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-config-data\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.366837 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-fernet-keys\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.367930 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-credential-keys\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.376734 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-config-data\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.380691 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m54fk\" (UniqueName: \"kubernetes.io/projected/48c62c70-f3bb-4d8d-a535-181ea2f57c13-kube-api-access-m54fk\") pod \"keystone-6d4fb875b7-xqrb6\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.381576 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6g2f9\" (UniqueName: \"kubernetes.io/projected/472c1b96-2dfa-4520-8f64-6040e4b4d7da-kube-api-access-6g2f9\") pod \"keystone-6d4fb875b7-2njc6\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.440974 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.455815 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.715110 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-2njc6"] Nov 25 09:48:00 crc kubenswrapper[4734]: I1125 09:48:00.974780 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6"] Nov 25 09:48:00 crc kubenswrapper[4734]: W1125 09:48:00.979524 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48c62c70_f3bb_4d8d_a535_181ea2f57c13.slice/crio-baf8aadf7c1eb84273aa441efc1bbb0334dd16f829fffb1a07aa64eb2094d78f WatchSource:0}: Error finding container baf8aadf7c1eb84273aa441efc1bbb0334dd16f829fffb1a07aa64eb2094d78f: Status 404 returned error can't find the container with id baf8aadf7c1eb84273aa441efc1bbb0334dd16f829fffb1a07aa64eb2094d78f Nov 25 09:48:01 crc kubenswrapper[4734]: I1125 09:48:01.029190 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" event={"ID":"48c62c70-f3bb-4d8d-a535-181ea2f57c13","Type":"ContainerStarted","Data":"baf8aadf7c1eb84273aa441efc1bbb0334dd16f829fffb1a07aa64eb2094d78f"} Nov 25 09:48:01 crc kubenswrapper[4734]: I1125 09:48:01.031507 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" event={"ID":"472c1b96-2dfa-4520-8f64-6040e4b4d7da","Type":"ContainerStarted","Data":"a9f8bcf771df93ca7648f71c1c9aace5eeb094093afdaaa5dff827630828fb2c"} Nov 25 09:48:01 crc kubenswrapper[4734]: I1125 09:48:01.031618 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" event={"ID":"472c1b96-2dfa-4520-8f64-6040e4b4d7da","Type":"ContainerStarted","Data":"4066ff7caded90a43fff4dc74f09c7dae132de27e8952ecad9c538443f2c27e4"} Nov 25 09:48:01 crc kubenswrapper[4734]: I1125 09:48:01.031946 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:01 crc kubenswrapper[4734]: I1125 09:48:01.050818 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" podStartSLOduration=1.050790911 podStartE2EDuration="1.050790911s" podCreationTimestamp="2025-11-25 09:48:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:48:01.046432037 +0000 UTC m=+1203.856894031" watchObservedRunningTime="2025-11-25 09:48:01.050790911 +0000 UTC m=+1203.861252905" Nov 25 09:48:02 crc kubenswrapper[4734]: I1125 09:48:02.041486 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" event={"ID":"48c62c70-f3bb-4d8d-a535-181ea2f57c13","Type":"ContainerStarted","Data":"071228e507be37b2d1cb909c8abafcc2d85713a829ad7afe2872e1244af8d624"} Nov 25 09:48:02 crc kubenswrapper[4734]: I1125 09:48:02.041908 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:02 crc kubenswrapper[4734]: I1125 09:48:02.067449 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" podStartSLOduration=2.067424796 podStartE2EDuration="2.067424796s" podCreationTimestamp="2025-11-25 09:48:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:48:02.060376725 +0000 UTC m=+1204.870838719" watchObservedRunningTime="2025-11-25 09:48:02.067424796 +0000 UTC m=+1204.877886790" Nov 25 09:48:20 crc kubenswrapper[4734]: I1125 09:48:20.695551 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:48:20 crc kubenswrapper[4734]: I1125 09:48:20.696111 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:48:20 crc kubenswrapper[4734]: I1125 09:48:20.696168 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:48:20 crc kubenswrapper[4734]: I1125 09:48:20.696859 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b897de4eab07f171dfe6b1c309559bbe3bc70bbec2b52f3e83fb0315718594f5"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:48:20 crc kubenswrapper[4734]: I1125 09:48:20.696916 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://b897de4eab07f171dfe6b1c309559bbe3bc70bbec2b52f3e83fb0315718594f5" gracePeriod=600 Nov 25 09:48:21 crc kubenswrapper[4734]: I1125 09:48:21.165714 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="b897de4eab07f171dfe6b1c309559bbe3bc70bbec2b52f3e83fb0315718594f5" exitCode=0 Nov 25 09:48:21 crc kubenswrapper[4734]: I1125 09:48:21.166119 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"b897de4eab07f171dfe6b1c309559bbe3bc70bbec2b52f3e83fb0315718594f5"} Nov 25 09:48:21 crc kubenswrapper[4734]: I1125 09:48:21.166236 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"90ed44f721fb21f7c9a46b3cc3ce395345dcb1bd87653d89f7c1d2cf3cd435fb"} Nov 25 09:48:21 crc kubenswrapper[4734]: I1125 09:48:21.166377 4734 scope.go:117] "RemoveContainer" containerID="3b8d398052ed40cef3b469389e339a6739d1c3bd141a5a0198cc0270e0a8927e" Nov 25 09:48:32 crc kubenswrapper[4734]: I1125 09:48:32.014144 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:32 crc kubenswrapper[4734]: I1125 09:48:32.025242 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:32 crc kubenswrapper[4734]: I1125 09:48:32.993187 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-2njc6"] Nov 25 09:48:32 crc kubenswrapper[4734]: I1125 09:48:32.993698 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" podUID="472c1b96-2dfa-4520-8f64-6040e4b4d7da" containerName="keystone-api" containerID="cri-o://a9f8bcf771df93ca7648f71c1c9aace5eeb094093afdaaa5dff827630828fb2c" gracePeriod=30 Nov 25 09:48:32 crc kubenswrapper[4734]: I1125 09:48:32.997991 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6"] Nov 25 09:48:32 crc kubenswrapper[4734]: I1125 09:48:32.998730 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" podUID="48c62c70-f3bb-4d8d-a535-181ea2f57c13" containerName="keystone-api" containerID="cri-o://071228e507be37b2d1cb909c8abafcc2d85713a829ad7afe2872e1244af8d624" gracePeriod=30 Nov 25 09:48:34 crc kubenswrapper[4734]: I1125 09:48:34.186616 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-dwh45"] Nov 25 09:48:34 crc kubenswrapper[4734]: I1125 09:48:34.186898 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" podUID="2bb041e1-7942-46d6-b64e-64c4de2cc86d" containerName="keystone-api" containerID="cri-o://4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd" gracePeriod=30 Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.273984 4734 generic.go:334] "Generic (PLEG): container finished" podID="472c1b96-2dfa-4520-8f64-6040e4b4d7da" containerID="a9f8bcf771df93ca7648f71c1c9aace5eeb094093afdaaa5dff827630828fb2c" exitCode=0 Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.274160 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" event={"ID":"472c1b96-2dfa-4520-8f64-6040e4b4d7da","Type":"ContainerDied","Data":"a9f8bcf771df93ca7648f71c1c9aace5eeb094093afdaaa5dff827630828fb2c"} Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.277427 4734 generic.go:334] "Generic (PLEG): container finished" podID="48c62c70-f3bb-4d8d-a535-181ea2f57c13" containerID="071228e507be37b2d1cb909c8abafcc2d85713a829ad7afe2872e1244af8d624" exitCode=0 Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.277479 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" event={"ID":"48c62c70-f3bb-4d8d-a535-181ea2f57c13","Type":"ContainerDied","Data":"071228e507be37b2d1cb909c8abafcc2d85713a829ad7afe2872e1244af8d624"} Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.608721 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.614075 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710654 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g2f9\" (UniqueName: \"kubernetes.io/projected/472c1b96-2dfa-4520-8f64-6040e4b4d7da-kube-api-access-6g2f9\") pod \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710694 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-credential-keys\") pod \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710715 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-credential-keys\") pod \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710803 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-scripts\") pod \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710825 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-config-data\") pod \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710874 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-fernet-keys\") pod \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710901 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-fernet-keys\") pod \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710936 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-scripts\") pod \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\" (UID: \"472c1b96-2dfa-4520-8f64-6040e4b4d7da\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.710985 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-config-data\") pod \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.711004 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m54fk\" (UniqueName: \"kubernetes.io/projected/48c62c70-f3bb-4d8d-a535-181ea2f57c13-kube-api-access-m54fk\") pod \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\" (UID: \"48c62c70-f3bb-4d8d-a535-181ea2f57c13\") " Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.717300 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "472c1b96-2dfa-4520-8f64-6040e4b4d7da" (UID: "472c1b96-2dfa-4520-8f64-6040e4b4d7da"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.717344 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48c62c70-f3bb-4d8d-a535-181ea2f57c13-kube-api-access-m54fk" (OuterVolumeSpecName: "kube-api-access-m54fk") pod "48c62c70-f3bb-4d8d-a535-181ea2f57c13" (UID: "48c62c70-f3bb-4d8d-a535-181ea2f57c13"). InnerVolumeSpecName "kube-api-access-m54fk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.717398 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/472c1b96-2dfa-4520-8f64-6040e4b4d7da-kube-api-access-6g2f9" (OuterVolumeSpecName: "kube-api-access-6g2f9") pod "472c1b96-2dfa-4520-8f64-6040e4b4d7da" (UID: "472c1b96-2dfa-4520-8f64-6040e4b4d7da"). InnerVolumeSpecName "kube-api-access-6g2f9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.717409 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "48c62c70-f3bb-4d8d-a535-181ea2f57c13" (UID: "48c62c70-f3bb-4d8d-a535-181ea2f57c13"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.717431 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-scripts" (OuterVolumeSpecName: "scripts") pod "48c62c70-f3bb-4d8d-a535-181ea2f57c13" (UID: "48c62c70-f3bb-4d8d-a535-181ea2f57c13"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.718069 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "48c62c70-f3bb-4d8d-a535-181ea2f57c13" (UID: "48c62c70-f3bb-4d8d-a535-181ea2f57c13"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.719177 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-scripts" (OuterVolumeSpecName: "scripts") pod "472c1b96-2dfa-4520-8f64-6040e4b4d7da" (UID: "472c1b96-2dfa-4520-8f64-6040e4b4d7da"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.731715 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "472c1b96-2dfa-4520-8f64-6040e4b4d7da" (UID: "472c1b96-2dfa-4520-8f64-6040e4b4d7da"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.735670 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-config-data" (OuterVolumeSpecName: "config-data") pod "48c62c70-f3bb-4d8d-a535-181ea2f57c13" (UID: "48c62c70-f3bb-4d8d-a535-181ea2f57c13"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.737334 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-config-data" (OuterVolumeSpecName: "config-data") pod "472c1b96-2dfa-4520-8f64-6040e4b4d7da" (UID: "472c1b96-2dfa-4520-8f64-6040e4b4d7da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812749 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812801 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m54fk\" (UniqueName: \"kubernetes.io/projected/48c62c70-f3bb-4d8d-a535-181ea2f57c13-kube-api-access-m54fk\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812815 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812824 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g2f9\" (UniqueName: \"kubernetes.io/projected/472c1b96-2dfa-4520-8f64-6040e4b4d7da-kube-api-access-6g2f9\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812833 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812840 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812847 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812855 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812862 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/48c62c70-f3bb-4d8d-a535-181ea2f57c13-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:36 crc kubenswrapper[4734]: I1125 09:48:36.812870 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/472c1b96-2dfa-4520-8f64-6040e4b4d7da-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.285405 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.285459 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-2njc6" event={"ID":"472c1b96-2dfa-4520-8f64-6040e4b4d7da","Type":"ContainerDied","Data":"4066ff7caded90a43fff4dc74f09c7dae132de27e8952ecad9c538443f2c27e4"} Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.285540 4734 scope.go:117] "RemoveContainer" containerID="a9f8bcf771df93ca7648f71c1c9aace5eeb094093afdaaa5dff827630828fb2c" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.288565 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" event={"ID":"48c62c70-f3bb-4d8d-a535-181ea2f57c13","Type":"ContainerDied","Data":"baf8aadf7c1eb84273aa441efc1bbb0334dd16f829fffb1a07aa64eb2094d78f"} Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.288614 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.310992 4734 scope.go:117] "RemoveContainer" containerID="071228e507be37b2d1cb909c8abafcc2d85713a829ad7afe2872e1244af8d624" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.320548 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-2njc6"] Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.330013 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-2njc6"] Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.337496 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6"] Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.341372 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-xqrb6"] Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.705278 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.835591 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-fernet-keys\") pod \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.835697 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-credential-keys\") pod \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.835719 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-config-data\") pod \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.835838 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wncql\" (UniqueName: \"kubernetes.io/projected/2bb041e1-7942-46d6-b64e-64c4de2cc86d-kube-api-access-wncql\") pod \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.835861 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-scripts\") pod \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\" (UID: \"2bb041e1-7942-46d6-b64e-64c4de2cc86d\") " Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.839564 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "2bb041e1-7942-46d6-b64e-64c4de2cc86d" (UID: "2bb041e1-7942-46d6-b64e-64c4de2cc86d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.841318 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "2bb041e1-7942-46d6-b64e-64c4de2cc86d" (UID: "2bb041e1-7942-46d6-b64e-64c4de2cc86d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.841332 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bb041e1-7942-46d6-b64e-64c4de2cc86d-kube-api-access-wncql" (OuterVolumeSpecName: "kube-api-access-wncql") pod "2bb041e1-7942-46d6-b64e-64c4de2cc86d" (UID: "2bb041e1-7942-46d6-b64e-64c4de2cc86d"). InnerVolumeSpecName "kube-api-access-wncql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.843260 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-scripts" (OuterVolumeSpecName: "scripts") pod "2bb041e1-7942-46d6-b64e-64c4de2cc86d" (UID: "2bb041e1-7942-46d6-b64e-64c4de2cc86d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.865062 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-config-data" (OuterVolumeSpecName: "config-data") pod "2bb041e1-7942-46d6-b64e-64c4de2cc86d" (UID: "2bb041e1-7942-46d6-b64e-64c4de2cc86d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.937365 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wncql\" (UniqueName: \"kubernetes.io/projected/2bb041e1-7942-46d6-b64e-64c4de2cc86d-kube-api-access-wncql\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.937775 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.937789 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.937821 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:37 crc kubenswrapper[4734]: I1125 09:48:37.937832 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bb041e1-7942-46d6-b64e-64c4de2cc86d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.263556 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="472c1b96-2dfa-4520-8f64-6040e4b4d7da" path="/var/lib/kubelet/pods/472c1b96-2dfa-4520-8f64-6040e4b4d7da/volumes" Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.264347 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48c62c70-f3bb-4d8d-a535-181ea2f57c13" path="/var/lib/kubelet/pods/48c62c70-f3bb-4d8d-a535-181ea2f57c13/volumes" Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.303754 4734 generic.go:334] "Generic (PLEG): container finished" podID="2bb041e1-7942-46d6-b64e-64c4de2cc86d" containerID="4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd" exitCode=0 Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.303825 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.303872 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" event={"ID":"2bb041e1-7942-46d6-b64e-64c4de2cc86d","Type":"ContainerDied","Data":"4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd"} Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.303933 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-6d4fb875b7-dwh45" event={"ID":"2bb041e1-7942-46d6-b64e-64c4de2cc86d","Type":"ContainerDied","Data":"974c893d475e15716e4f2bcdf09759a8c16aec72ce9fb23094455e104fbd04ba"} Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.303957 4734 scope.go:117] "RemoveContainer" containerID="4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd" Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.333251 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-dwh45"] Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.335402 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-6d4fb875b7-dwh45"] Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.337536 4734 scope.go:117] "RemoveContainer" containerID="4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd" Nov 25 09:48:38 crc kubenswrapper[4734]: E1125 09:48:38.338143 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd\": container with ID starting with 4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd not found: ID does not exist" containerID="4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd" Nov 25 09:48:38 crc kubenswrapper[4734]: I1125 09:48:38.338207 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd"} err="failed to get container status \"4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd\": rpc error: code = NotFound desc = could not find container \"4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd\": container with ID starting with 4c8e58819e9ef112ef78cab9820dc7b610022ce760bd5967c364fd1f018ebcdd not found: ID does not exist" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.345067 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-rc62x"] Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.352411 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-rc62x"] Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.358478 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-pdk5l"] Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.363202 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-pdk5l"] Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.424918 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystonefb32-account-delete-bnkht"] Nov 25 09:48:39 crc kubenswrapper[4734]: E1125 09:48:39.425221 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48c62c70-f3bb-4d8d-a535-181ea2f57c13" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.425243 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="48c62c70-f3bb-4d8d-a535-181ea2f57c13" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: E1125 09:48:39.425262 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="472c1b96-2dfa-4520-8f64-6040e4b4d7da" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.425270 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="472c1b96-2dfa-4520-8f64-6040e4b4d7da" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: E1125 09:48:39.425297 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bb041e1-7942-46d6-b64e-64c4de2cc86d" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.425307 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bb041e1-7942-46d6-b64e-64c4de2cc86d" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.425444 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bb041e1-7942-46d6-b64e-64c4de2cc86d" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.425460 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="472c1b96-2dfa-4520-8f64-6040e4b4d7da" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.425473 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="48c62c70-f3bb-4d8d-a535-181ea2f57c13" containerName="keystone-api" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.425987 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.434546 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystonefb32-account-delete-bnkht"] Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.558759 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cc2c7b9-8384-480a-b49c-81b028f55f56-operator-scripts\") pod \"keystonefb32-account-delete-bnkht\" (UID: \"2cc2c7b9-8384-480a-b49c-81b028f55f56\") " pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.559207 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srtvc\" (UniqueName: \"kubernetes.io/projected/2cc2c7b9-8384-480a-b49c-81b028f55f56-kube-api-access-srtvc\") pod \"keystonefb32-account-delete-bnkht\" (UID: \"2cc2c7b9-8384-480a-b49c-81b028f55f56\") " pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.661736 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cc2c7b9-8384-480a-b49c-81b028f55f56-operator-scripts\") pod \"keystonefb32-account-delete-bnkht\" (UID: \"2cc2c7b9-8384-480a-b49c-81b028f55f56\") " pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.661791 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srtvc\" (UniqueName: \"kubernetes.io/projected/2cc2c7b9-8384-480a-b49c-81b028f55f56-kube-api-access-srtvc\") pod \"keystonefb32-account-delete-bnkht\" (UID: \"2cc2c7b9-8384-480a-b49c-81b028f55f56\") " pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.663027 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cc2c7b9-8384-480a-b49c-81b028f55f56-operator-scripts\") pod \"keystonefb32-account-delete-bnkht\" (UID: \"2cc2c7b9-8384-480a-b49c-81b028f55f56\") " pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.684055 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srtvc\" (UniqueName: \"kubernetes.io/projected/2cc2c7b9-8384-480a-b49c-81b028f55f56-kube-api-access-srtvc\") pod \"keystonefb32-account-delete-bnkht\" (UID: \"2cc2c7b9-8384-480a-b49c-81b028f55f56\") " pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:39 crc kubenswrapper[4734]: I1125 09:48:39.742955 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:40 crc kubenswrapper[4734]: I1125 09:48:40.158712 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystonefb32-account-delete-bnkht"] Nov 25 09:48:40 crc kubenswrapper[4734]: I1125 09:48:40.260006 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bb041e1-7942-46d6-b64e-64c4de2cc86d" path="/var/lib/kubelet/pods/2bb041e1-7942-46d6-b64e-64c4de2cc86d/volumes" Nov 25 09:48:40 crc kubenswrapper[4734]: I1125 09:48:40.260987 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d956d9d8-006f-4404-9917-0178e765a9dc" path="/var/lib/kubelet/pods/d956d9d8-006f-4404-9917-0178e765a9dc/volumes" Nov 25 09:48:40 crc kubenswrapper[4734]: I1125 09:48:40.261658 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dff36379-a812-41c5-ad89-4d0e9065bfe5" path="/var/lib/kubelet/pods/dff36379-a812-41c5-ad89-4d0e9065bfe5/volumes" Nov 25 09:48:40 crc kubenswrapper[4734]: I1125 09:48:40.323221 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" event={"ID":"2cc2c7b9-8384-480a-b49c-81b028f55f56","Type":"ContainerStarted","Data":"4994d0a215ddaf0c6504fa18c37ad4e0df491b13ab084f1434c0344e0e058aea"} Nov 25 09:48:41 crc kubenswrapper[4734]: I1125 09:48:41.330510 4734 generic.go:334] "Generic (PLEG): container finished" podID="2cc2c7b9-8384-480a-b49c-81b028f55f56" containerID="40ed7dd79ced8b6e86bbc1ea93dc314d6703467b73066fa8acbd2386f8d03846" exitCode=0 Nov 25 09:48:41 crc kubenswrapper[4734]: I1125 09:48:41.330616 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" event={"ID":"2cc2c7b9-8384-480a-b49c-81b028f55f56","Type":"ContainerDied","Data":"40ed7dd79ced8b6e86bbc1ea93dc314d6703467b73066fa8acbd2386f8d03846"} Nov 25 09:48:42 crc kubenswrapper[4734]: I1125 09:48:42.579497 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:42 crc kubenswrapper[4734]: I1125 09:48:42.705260 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cc2c7b9-8384-480a-b49c-81b028f55f56-operator-scripts\") pod \"2cc2c7b9-8384-480a-b49c-81b028f55f56\" (UID: \"2cc2c7b9-8384-480a-b49c-81b028f55f56\") " Nov 25 09:48:42 crc kubenswrapper[4734]: I1125 09:48:42.705326 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srtvc\" (UniqueName: \"kubernetes.io/projected/2cc2c7b9-8384-480a-b49c-81b028f55f56-kube-api-access-srtvc\") pod \"2cc2c7b9-8384-480a-b49c-81b028f55f56\" (UID: \"2cc2c7b9-8384-480a-b49c-81b028f55f56\") " Nov 25 09:48:42 crc kubenswrapper[4734]: I1125 09:48:42.705820 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cc2c7b9-8384-480a-b49c-81b028f55f56-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2cc2c7b9-8384-480a-b49c-81b028f55f56" (UID: "2cc2c7b9-8384-480a-b49c-81b028f55f56"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:42 crc kubenswrapper[4734]: I1125 09:48:42.710348 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cc2c7b9-8384-480a-b49c-81b028f55f56-kube-api-access-srtvc" (OuterVolumeSpecName: "kube-api-access-srtvc") pod "2cc2c7b9-8384-480a-b49c-81b028f55f56" (UID: "2cc2c7b9-8384-480a-b49c-81b028f55f56"). InnerVolumeSpecName "kube-api-access-srtvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:42 crc kubenswrapper[4734]: I1125 09:48:42.808199 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2cc2c7b9-8384-480a-b49c-81b028f55f56-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:42 crc kubenswrapper[4734]: I1125 09:48:42.808239 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srtvc\" (UniqueName: \"kubernetes.io/projected/2cc2c7b9-8384-480a-b49c-81b028f55f56-kube-api-access-srtvc\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:43 crc kubenswrapper[4734]: I1125 09:48:43.359687 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" event={"ID":"2cc2c7b9-8384-480a-b49c-81b028f55f56","Type":"ContainerDied","Data":"4994d0a215ddaf0c6504fa18c37ad4e0df491b13ab084f1434c0344e0e058aea"} Nov 25 09:48:43 crc kubenswrapper[4734]: I1125 09:48:43.360063 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4994d0a215ddaf0c6504fa18c37ad4e0df491b13ab084f1434c0344e0e058aea" Nov 25 09:48:43 crc kubenswrapper[4734]: I1125 09:48:43.359762 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystonefb32-account-delete-bnkht" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.446451 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkqft"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.463024 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkqft"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.470257 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystonefb32-account-delete-bnkht"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.472425 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.476016 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystonefb32-account-delete-bnkht"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.479849 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-fb32-account-create-update-m8jp8"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.535429 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-create-6ssj6"] Nov 25 09:48:44 crc kubenswrapper[4734]: E1125 09:48:44.535712 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cc2c7b9-8384-480a-b49c-81b028f55f56" containerName="mariadb-account-delete" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.535726 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cc2c7b9-8384-480a-b49c-81b028f55f56" containerName="mariadb-account-delete" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.535888 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cc2c7b9-8384-480a-b49c-81b028f55f56" containerName="mariadb-account-delete" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.536378 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.546063 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-6ssj6"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.634849 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlgvr\" (UniqueName: \"kubernetes.io/projected/45995a25-2249-4f80-8f19-9dd95343d93b-kube-api-access-nlgvr\") pod \"keystone-db-create-6ssj6\" (UID: \"45995a25-2249-4f80-8f19-9dd95343d93b\") " pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.634898 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45995a25-2249-4f80-8f19-9dd95343d93b-operator-scripts\") pod \"keystone-db-create-6ssj6\" (UID: \"45995a25-2249-4f80-8f19-9dd95343d93b\") " pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.640830 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.641734 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.643635 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-db-secret" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.653287 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx"] Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.736391 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa726887-07d2-4e56-ae70-c21f72b81e34-operator-scripts\") pod \"keystone-cfaa-account-create-update-ngxcx\" (UID: \"aa726887-07d2-4e56-ae70-c21f72b81e34\") " pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.736483 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlgvr\" (UniqueName: \"kubernetes.io/projected/45995a25-2249-4f80-8f19-9dd95343d93b-kube-api-access-nlgvr\") pod \"keystone-db-create-6ssj6\" (UID: \"45995a25-2249-4f80-8f19-9dd95343d93b\") " pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.736508 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45995a25-2249-4f80-8f19-9dd95343d93b-operator-scripts\") pod \"keystone-db-create-6ssj6\" (UID: \"45995a25-2249-4f80-8f19-9dd95343d93b\") " pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.736530 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfzw8\" (UniqueName: \"kubernetes.io/projected/aa726887-07d2-4e56-ae70-c21f72b81e34-kube-api-access-vfzw8\") pod \"keystone-cfaa-account-create-update-ngxcx\" (UID: \"aa726887-07d2-4e56-ae70-c21f72b81e34\") " pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.737502 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45995a25-2249-4f80-8f19-9dd95343d93b-operator-scripts\") pod \"keystone-db-create-6ssj6\" (UID: \"45995a25-2249-4f80-8f19-9dd95343d93b\") " pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.755343 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlgvr\" (UniqueName: \"kubernetes.io/projected/45995a25-2249-4f80-8f19-9dd95343d93b-kube-api-access-nlgvr\") pod \"keystone-db-create-6ssj6\" (UID: \"45995a25-2249-4f80-8f19-9dd95343d93b\") " pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.837760 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfzw8\" (UniqueName: \"kubernetes.io/projected/aa726887-07d2-4e56-ae70-c21f72b81e34-kube-api-access-vfzw8\") pod \"keystone-cfaa-account-create-update-ngxcx\" (UID: \"aa726887-07d2-4e56-ae70-c21f72b81e34\") " pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.837886 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa726887-07d2-4e56-ae70-c21f72b81e34-operator-scripts\") pod \"keystone-cfaa-account-create-update-ngxcx\" (UID: \"aa726887-07d2-4e56-ae70-c21f72b81e34\") " pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.838710 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa726887-07d2-4e56-ae70-c21f72b81e34-operator-scripts\") pod \"keystone-cfaa-account-create-update-ngxcx\" (UID: \"aa726887-07d2-4e56-ae70-c21f72b81e34\") " pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.851589 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.854784 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfzw8\" (UniqueName: \"kubernetes.io/projected/aa726887-07d2-4e56-ae70-c21f72b81e34-kube-api-access-vfzw8\") pod \"keystone-cfaa-account-create-update-ngxcx\" (UID: \"aa726887-07d2-4e56-ae70-c21f72b81e34\") " pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:44 crc kubenswrapper[4734]: I1125 09:48:44.961782 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:45 crc kubenswrapper[4734]: I1125 09:48:45.070159 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-6ssj6"] Nov 25 09:48:45 crc kubenswrapper[4734]: W1125 09:48:45.085828 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45995a25_2249_4f80_8f19_9dd95343d93b.slice/crio-f9c41421dc8fdd6a0b09867449adb66013b9cc3b80d354d90ea37b630438673e WatchSource:0}: Error finding container f9c41421dc8fdd6a0b09867449adb66013b9cc3b80d354d90ea37b630438673e: Status 404 returned error can't find the container with id f9c41421dc8fdd6a0b09867449adb66013b9cc3b80d354d90ea37b630438673e Nov 25 09:48:45 crc kubenswrapper[4734]: I1125 09:48:45.188393 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx"] Nov 25 09:48:45 crc kubenswrapper[4734]: W1125 09:48:45.194037 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa726887_07d2_4e56_ae70_c21f72b81e34.slice/crio-bd4eaa9f1dc7527ad1a7c2fdb4d281e58222f5406c33e2d37f5ebc94b5658625 WatchSource:0}: Error finding container bd4eaa9f1dc7527ad1a7c2fdb4d281e58222f5406c33e2d37f5ebc94b5658625: Status 404 returned error can't find the container with id bd4eaa9f1dc7527ad1a7c2fdb4d281e58222f5406c33e2d37f5ebc94b5658625 Nov 25 09:48:45 crc kubenswrapper[4734]: I1125 09:48:45.375569 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" event={"ID":"45995a25-2249-4f80-8f19-9dd95343d93b","Type":"ContainerStarted","Data":"ce45562aeb357c41614baf1b0c4278ccf57da7fd84deb334076d6978c5f6b348"} Nov 25 09:48:45 crc kubenswrapper[4734]: I1125 09:48:45.375634 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" event={"ID":"45995a25-2249-4f80-8f19-9dd95343d93b","Type":"ContainerStarted","Data":"f9c41421dc8fdd6a0b09867449adb66013b9cc3b80d354d90ea37b630438673e"} Nov 25 09:48:45 crc kubenswrapper[4734]: I1125 09:48:45.377436 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" event={"ID":"aa726887-07d2-4e56-ae70-c21f72b81e34","Type":"ContainerStarted","Data":"d0761cd256811635f37219d3ff8777c7e853b0800f1f1ddb642249dfe3096c67"} Nov 25 09:48:45 crc kubenswrapper[4734]: I1125 09:48:45.377480 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" event={"ID":"aa726887-07d2-4e56-ae70-c21f72b81e34","Type":"ContainerStarted","Data":"bd4eaa9f1dc7527ad1a7c2fdb4d281e58222f5406c33e2d37f5ebc94b5658625"} Nov 25 09:48:45 crc kubenswrapper[4734]: I1125 09:48:45.394264 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" podStartSLOduration=1.394241171 podStartE2EDuration="1.394241171s" podCreationTimestamp="2025-11-25 09:48:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:48:45.3886325 +0000 UTC m=+1248.199094494" watchObservedRunningTime="2025-11-25 09:48:45.394241171 +0000 UTC m=+1248.204703175" Nov 25 09:48:46 crc kubenswrapper[4734]: I1125 09:48:46.255341 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27fa64dc-7838-4fef-9a31-4033be9913df" path="/var/lib/kubelet/pods/27fa64dc-7838-4fef-9a31-4033be9913df/volumes" Nov 25 09:48:46 crc kubenswrapper[4734]: I1125 09:48:46.256309 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cc2c7b9-8384-480a-b49c-81b028f55f56" path="/var/lib/kubelet/pods/2cc2c7b9-8384-480a-b49c-81b028f55f56/volumes" Nov 25 09:48:46 crc kubenswrapper[4734]: I1125 09:48:46.257014 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb37b695-6599-4b41-b675-ec6ffa470d35" path="/var/lib/kubelet/pods/eb37b695-6599-4b41-b675-ec6ffa470d35/volumes" Nov 25 09:48:46 crc kubenswrapper[4734]: I1125 09:48:46.387850 4734 generic.go:334] "Generic (PLEG): container finished" podID="45995a25-2249-4f80-8f19-9dd95343d93b" containerID="ce45562aeb357c41614baf1b0c4278ccf57da7fd84deb334076d6978c5f6b348" exitCode=0 Nov 25 09:48:46 crc kubenswrapper[4734]: I1125 09:48:46.387928 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" event={"ID":"45995a25-2249-4f80-8f19-9dd95343d93b","Type":"ContainerDied","Data":"ce45562aeb357c41614baf1b0c4278ccf57da7fd84deb334076d6978c5f6b348"} Nov 25 09:48:46 crc kubenswrapper[4734]: I1125 09:48:46.389914 4734 generic.go:334] "Generic (PLEG): container finished" podID="aa726887-07d2-4e56-ae70-c21f72b81e34" containerID="d0761cd256811635f37219d3ff8777c7e853b0800f1f1ddb642249dfe3096c67" exitCode=0 Nov 25 09:48:46 crc kubenswrapper[4734]: I1125 09:48:46.389983 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" event={"ID":"aa726887-07d2-4e56-ae70-c21f72b81e34","Type":"ContainerDied","Data":"d0761cd256811635f37219d3ff8777c7e853b0800f1f1ddb642249dfe3096c67"} Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.677918 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.682849 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.785354 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa726887-07d2-4e56-ae70-c21f72b81e34-operator-scripts\") pod \"aa726887-07d2-4e56-ae70-c21f72b81e34\" (UID: \"aa726887-07d2-4e56-ae70-c21f72b81e34\") " Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.785492 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlgvr\" (UniqueName: \"kubernetes.io/projected/45995a25-2249-4f80-8f19-9dd95343d93b-kube-api-access-nlgvr\") pod \"45995a25-2249-4f80-8f19-9dd95343d93b\" (UID: \"45995a25-2249-4f80-8f19-9dd95343d93b\") " Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.785563 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vfzw8\" (UniqueName: \"kubernetes.io/projected/aa726887-07d2-4e56-ae70-c21f72b81e34-kube-api-access-vfzw8\") pod \"aa726887-07d2-4e56-ae70-c21f72b81e34\" (UID: \"aa726887-07d2-4e56-ae70-c21f72b81e34\") " Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.785597 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45995a25-2249-4f80-8f19-9dd95343d93b-operator-scripts\") pod \"45995a25-2249-4f80-8f19-9dd95343d93b\" (UID: \"45995a25-2249-4f80-8f19-9dd95343d93b\") " Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.785911 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa726887-07d2-4e56-ae70-c21f72b81e34-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aa726887-07d2-4e56-ae70-c21f72b81e34" (UID: "aa726887-07d2-4e56-ae70-c21f72b81e34"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.786323 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45995a25-2249-4f80-8f19-9dd95343d93b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "45995a25-2249-4f80-8f19-9dd95343d93b" (UID: "45995a25-2249-4f80-8f19-9dd95343d93b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.791311 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45995a25-2249-4f80-8f19-9dd95343d93b-kube-api-access-nlgvr" (OuterVolumeSpecName: "kube-api-access-nlgvr") pod "45995a25-2249-4f80-8f19-9dd95343d93b" (UID: "45995a25-2249-4f80-8f19-9dd95343d93b"). InnerVolumeSpecName "kube-api-access-nlgvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.791790 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa726887-07d2-4e56-ae70-c21f72b81e34-kube-api-access-vfzw8" (OuterVolumeSpecName: "kube-api-access-vfzw8") pod "aa726887-07d2-4e56-ae70-c21f72b81e34" (UID: "aa726887-07d2-4e56-ae70-c21f72b81e34"). InnerVolumeSpecName "kube-api-access-vfzw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.888103 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vfzw8\" (UniqueName: \"kubernetes.io/projected/aa726887-07d2-4e56-ae70-c21f72b81e34-kube-api-access-vfzw8\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.888144 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45995a25-2249-4f80-8f19-9dd95343d93b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.888156 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa726887-07d2-4e56-ae70-c21f72b81e34-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:47 crc kubenswrapper[4734]: I1125 09:48:47.888167 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlgvr\" (UniqueName: \"kubernetes.io/projected/45995a25-2249-4f80-8f19-9dd95343d93b-kube-api-access-nlgvr\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:48 crc kubenswrapper[4734]: I1125 09:48:48.403573 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" event={"ID":"45995a25-2249-4f80-8f19-9dd95343d93b","Type":"ContainerDied","Data":"f9c41421dc8fdd6a0b09867449adb66013b9cc3b80d354d90ea37b630438673e"} Nov 25 09:48:48 crc kubenswrapper[4734]: I1125 09:48:48.403616 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9c41421dc8fdd6a0b09867449adb66013b9cc3b80d354d90ea37b630438673e" Nov 25 09:48:48 crc kubenswrapper[4734]: I1125 09:48:48.403589 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-6ssj6" Nov 25 09:48:48 crc kubenswrapper[4734]: I1125 09:48:48.405942 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" event={"ID":"aa726887-07d2-4e56-ae70-c21f72b81e34","Type":"ContainerDied","Data":"bd4eaa9f1dc7527ad1a7c2fdb4d281e58222f5406c33e2d37f5ebc94b5658625"} Nov 25 09:48:48 crc kubenswrapper[4734]: I1125 09:48:48.406025 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd4eaa9f1dc7527ad1a7c2fdb4d281e58222f5406c33e2d37f5ebc94b5658625" Nov 25 09:48:48 crc kubenswrapper[4734]: I1125 09:48:48.406064 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.294372 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-2jzk5"] Nov 25 09:48:50 crc kubenswrapper[4734]: E1125 09:48:50.294884 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa726887-07d2-4e56-ae70-c21f72b81e34" containerName="mariadb-account-create-update" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.294898 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa726887-07d2-4e56-ae70-c21f72b81e34" containerName="mariadb-account-create-update" Nov 25 09:48:50 crc kubenswrapper[4734]: E1125 09:48:50.294917 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45995a25-2249-4f80-8f19-9dd95343d93b" containerName="mariadb-database-create" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.294923 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="45995a25-2249-4f80-8f19-9dd95343d93b" containerName="mariadb-database-create" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.295052 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="45995a25-2249-4f80-8f19-9dd95343d93b" containerName="mariadb-database-create" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.295064 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa726887-07d2-4e56-ae70-c21f72b81e34" containerName="mariadb-account-create-update" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.295469 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.297418 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.297863 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.298345 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"combined-ca-bundle" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.298764 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-mglb9" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.299300 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.317379 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-2jzk5"] Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.425501 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-config-data\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.425609 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-combined-ca-bundle\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.425650 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j678g\" (UniqueName: \"kubernetes.io/projected/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-kube-api-access-j678g\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.527228 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-config-data\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.527307 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-combined-ca-bundle\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.527331 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j678g\" (UniqueName: \"kubernetes.io/projected/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-kube-api-access-j678g\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.532634 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-combined-ca-bundle\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.532977 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-config-data\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.545013 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j678g\" (UniqueName: \"kubernetes.io/projected/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-kube-api-access-j678g\") pod \"keystone-db-sync-2jzk5\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.613217 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:50 crc kubenswrapper[4734]: I1125 09:48:50.804000 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-2jzk5"] Nov 25 09:48:51 crc kubenswrapper[4734]: I1125 09:48:51.424873 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" event={"ID":"a36997ba-eaa6-4539-ba90-a1b9d147fc2d","Type":"ContainerStarted","Data":"008686f232377773a367a2e6083e9a4d2d5076bfad0694013ffd795dbd5af98c"} Nov 25 09:48:51 crc kubenswrapper[4734]: I1125 09:48:51.425223 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" event={"ID":"a36997ba-eaa6-4539-ba90-a1b9d147fc2d","Type":"ContainerStarted","Data":"30035ca3444924fcd04657e1026ddd8a779fc4ad2037c6a20436a4db7b613fa6"} Nov 25 09:48:51 crc kubenswrapper[4734]: I1125 09:48:51.446620 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" podStartSLOduration=1.446597358 podStartE2EDuration="1.446597358s" podCreationTimestamp="2025-11-25 09:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:48:51.443512539 +0000 UTC m=+1254.253974533" watchObservedRunningTime="2025-11-25 09:48:51.446597358 +0000 UTC m=+1254.257059392" Nov 25 09:48:53 crc kubenswrapper[4734]: I1125 09:48:53.441908 4734 generic.go:334] "Generic (PLEG): container finished" podID="a36997ba-eaa6-4539-ba90-a1b9d147fc2d" containerID="008686f232377773a367a2e6083e9a4d2d5076bfad0694013ffd795dbd5af98c" exitCode=0 Nov 25 09:48:53 crc kubenswrapper[4734]: I1125 09:48:53.442031 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" event={"ID":"a36997ba-eaa6-4539-ba90-a1b9d147fc2d","Type":"ContainerDied","Data":"008686f232377773a367a2e6083e9a4d2d5076bfad0694013ffd795dbd5af98c"} Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.740194 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.790023 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-config-data\") pod \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.790128 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-combined-ca-bundle\") pod \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.790181 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j678g\" (UniqueName: \"kubernetes.io/projected/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-kube-api-access-j678g\") pod \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\" (UID: \"a36997ba-eaa6-4539-ba90-a1b9d147fc2d\") " Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.796427 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-kube-api-access-j678g" (OuterVolumeSpecName: "kube-api-access-j678g") pod "a36997ba-eaa6-4539-ba90-a1b9d147fc2d" (UID: "a36997ba-eaa6-4539-ba90-a1b9d147fc2d"). InnerVolumeSpecName "kube-api-access-j678g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.808962 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a36997ba-eaa6-4539-ba90-a1b9d147fc2d" (UID: "a36997ba-eaa6-4539-ba90-a1b9d147fc2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.822683 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-config-data" (OuterVolumeSpecName: "config-data") pod "a36997ba-eaa6-4539-ba90-a1b9d147fc2d" (UID: "a36997ba-eaa6-4539-ba90-a1b9d147fc2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.891613 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.891818 4734 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:54 crc kubenswrapper[4734]: I1125 09:48:54.891923 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j678g\" (UniqueName: \"kubernetes.io/projected/a36997ba-eaa6-4539-ba90-a1b9d147fc2d-kube-api-access-j678g\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.458074 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" event={"ID":"a36997ba-eaa6-4539-ba90-a1b9d147fc2d","Type":"ContainerDied","Data":"30035ca3444924fcd04657e1026ddd8a779fc4ad2037c6a20436a4db7b613fa6"} Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.458181 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30035ca3444924fcd04657e1026ddd8a779fc4ad2037c6a20436a4db7b613fa6" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.458441 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-2jzk5" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.641783 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-4fssf"] Nov 25 09:48:55 crc kubenswrapper[4734]: E1125 09:48:55.643268 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a36997ba-eaa6-4539-ba90-a1b9d147fc2d" containerName="keystone-db-sync" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.643303 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="a36997ba-eaa6-4539-ba90-a1b9d147fc2d" containerName="keystone-db-sync" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.643539 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="a36997ba-eaa6-4539-ba90-a1b9d147fc2d" containerName="keystone-db-sync" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.644150 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.647134 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"combined-ca-bundle" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.647435 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.647463 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.647839 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-mglb9" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.648053 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"osp-secret" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.648263 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.659438 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-4fssf"] Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.703368 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-config-data\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.703461 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-fernet-keys\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.703873 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-scripts\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.704004 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjh4f\" (UniqueName: \"kubernetes.io/projected/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-kube-api-access-fjh4f\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.704205 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-combined-ca-bundle\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.704518 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-credential-keys\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.806618 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-combined-ca-bundle\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.807034 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-credential-keys\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.807689 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-config-data\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.807807 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-fernet-keys\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.807881 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-scripts\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.807937 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjh4f\" (UniqueName: \"kubernetes.io/projected/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-kube-api-access-fjh4f\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.812215 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-scripts\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.813168 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-config-data\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.813519 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-credential-keys\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.821768 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-combined-ca-bundle\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.822004 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-fernet-keys\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.826724 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjh4f\" (UniqueName: \"kubernetes.io/projected/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-kube-api-access-fjh4f\") pod \"keystone-bootstrap-4fssf\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:55 crc kubenswrapper[4734]: I1125 09:48:55.967212 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:48:56 crc kubenswrapper[4734]: I1125 09:48:56.390532 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-4fssf"] Nov 25 09:48:56 crc kubenswrapper[4734]: I1125 09:48:56.465803 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" event={"ID":"f626b2c0-663c-49e0-b5ad-fdc1edec33c9","Type":"ContainerStarted","Data":"f82106f11ee967ea9725a201af0ce4090b35ddfbece1ef5cafdb8da6f1680ab7"} Nov 25 09:48:57 crc kubenswrapper[4734]: I1125 09:48:57.475937 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" event={"ID":"f626b2c0-663c-49e0-b5ad-fdc1edec33c9","Type":"ContainerStarted","Data":"eebb77b50be36935e18d60033a275119e3727c421e585d5c319b5eaadc424c13"} Nov 25 09:48:57 crc kubenswrapper[4734]: I1125 09:48:57.505954 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" podStartSLOduration=2.505932335 podStartE2EDuration="2.505932335s" podCreationTimestamp="2025-11-25 09:48:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:48:57.501987182 +0000 UTC m=+1260.312449176" watchObservedRunningTime="2025-11-25 09:48:57.505932335 +0000 UTC m=+1260.316394339" Nov 25 09:48:59 crc kubenswrapper[4734]: I1125 09:48:59.490541 4734 generic.go:334] "Generic (PLEG): container finished" podID="f626b2c0-663c-49e0-b5ad-fdc1edec33c9" containerID="eebb77b50be36935e18d60033a275119e3727c421e585d5c319b5eaadc424c13" exitCode=0 Nov 25 09:48:59 crc kubenswrapper[4734]: I1125 09:48:59.490634 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" event={"ID":"f626b2c0-663c-49e0-b5ad-fdc1edec33c9","Type":"ContainerDied","Data":"eebb77b50be36935e18d60033a275119e3727c421e585d5c319b5eaadc424c13"} Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.634127 4734 scope.go:117] "RemoveContainer" containerID="55f84f35832beb9984b2bd4c4183762d4899b694ebe0b554a338b7970d7b0cd7" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.694906 4734 scope.go:117] "RemoveContainer" containerID="8e76fb6a2e214acbac8fd404dd9a5eaf844ed20edeae7fbfdc73fa6fc2c093be" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.728182 4734 scope.go:117] "RemoveContainer" containerID="544af142c8c371d6c344824647851e5725e1c072d2fc2d9aa100bf6cf491866f" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.833035 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.903647 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-combined-ca-bundle\") pod \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.903728 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-credential-keys\") pod \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.903751 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-scripts\") pod \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.903809 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-fernet-keys\") pod \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.903834 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-config-data\") pod \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.903879 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjh4f\" (UniqueName: \"kubernetes.io/projected/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-kube-api-access-fjh4f\") pod \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\" (UID: \"f626b2c0-663c-49e0-b5ad-fdc1edec33c9\") " Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.909982 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-kube-api-access-fjh4f" (OuterVolumeSpecName: "kube-api-access-fjh4f") pod "f626b2c0-663c-49e0-b5ad-fdc1edec33c9" (UID: "f626b2c0-663c-49e0-b5ad-fdc1edec33c9"). InnerVolumeSpecName "kube-api-access-fjh4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.910299 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-scripts" (OuterVolumeSpecName: "scripts") pod "f626b2c0-663c-49e0-b5ad-fdc1edec33c9" (UID: "f626b2c0-663c-49e0-b5ad-fdc1edec33c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.910378 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "f626b2c0-663c-49e0-b5ad-fdc1edec33c9" (UID: "f626b2c0-663c-49e0-b5ad-fdc1edec33c9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.910544 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "f626b2c0-663c-49e0-b5ad-fdc1edec33c9" (UID: "f626b2c0-663c-49e0-b5ad-fdc1edec33c9"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.925909 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f626b2c0-663c-49e0-b5ad-fdc1edec33c9" (UID: "f626b2c0-663c-49e0-b5ad-fdc1edec33c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:00 crc kubenswrapper[4734]: I1125 09:49:00.926310 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-config-data" (OuterVolumeSpecName: "config-data") pod "f626b2c0-663c-49e0-b5ad-fdc1edec33c9" (UID: "f626b2c0-663c-49e0-b5ad-fdc1edec33c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.005468 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.005536 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.005559 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.005573 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjh4f\" (UniqueName: \"kubernetes.io/projected/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-kube-api-access-fjh4f\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.005695 4734 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.005715 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f626b2c0-663c-49e0-b5ad-fdc1edec33c9-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.506170 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" event={"ID":"f626b2c0-663c-49e0-b5ad-fdc1edec33c9","Type":"ContainerDied","Data":"f82106f11ee967ea9725a201af0ce4090b35ddfbece1ef5cafdb8da6f1680ab7"} Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.506213 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f82106f11ee967ea9725a201af0ce4090b35ddfbece1ef5cafdb8da6f1680ab7" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.506222 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-4fssf" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.931627 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-5f67bc6985-689b7"] Nov 25 09:49:01 crc kubenswrapper[4734]: E1125 09:49:01.933668 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f626b2c0-663c-49e0-b5ad-fdc1edec33c9" containerName="keystone-bootstrap" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.933758 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f626b2c0-663c-49e0-b5ad-fdc1edec33c9" containerName="keystone-bootstrap" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.933989 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f626b2c0-663c-49e0-b5ad-fdc1edec33c9" containerName="keystone-bootstrap" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.934852 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.940745 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.942211 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"cert-keystone-internal-svc" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.943201 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"cert-keystone-public-svc" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.947705 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.949289 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-mglb9" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.950626 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"combined-ca-bundle" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.955039 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:49:01 crc kubenswrapper[4734]: I1125 09:49:01.958890 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-5f67bc6985-689b7"] Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.020194 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-credential-keys\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.020248 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r947q\" (UniqueName: \"kubernetes.io/projected/724b6630-d284-4fbe-83b9-96cd1634248f-kube-api-access-r947q\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.020276 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-internal-tls-certs\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.020393 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-config-data\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.020440 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-public-tls-certs\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.020480 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-combined-ca-bundle\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.020529 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-scripts\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.020553 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-fernet-keys\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.121924 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-fernet-keys\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.121978 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-scripts\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.122012 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-credential-keys\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.122063 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r947q\" (UniqueName: \"kubernetes.io/projected/724b6630-d284-4fbe-83b9-96cd1634248f-kube-api-access-r947q\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.122122 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-internal-tls-certs\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.122190 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-config-data\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.122216 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-public-tls-certs\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.122251 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-combined-ca-bundle\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.127499 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-credential-keys\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.127574 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-config-data\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.128036 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-combined-ca-bundle\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.128568 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-scripts\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.128591 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-fernet-keys\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.129517 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-internal-tls-certs\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.130100 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-public-tls-certs\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.142567 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r947q\" (UniqueName: \"kubernetes.io/projected/724b6630-d284-4fbe-83b9-96cd1634248f-kube-api-access-r947q\") pod \"keystone-5f67bc6985-689b7\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.281124 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:02 crc kubenswrapper[4734]: I1125 09:49:02.695662 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-5f67bc6985-689b7"] Nov 25 09:49:03 crc kubenswrapper[4734]: I1125 09:49:03.528703 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" event={"ID":"724b6630-d284-4fbe-83b9-96cd1634248f","Type":"ContainerStarted","Data":"bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2"} Nov 25 09:49:03 crc kubenswrapper[4734]: I1125 09:49:03.529009 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" event={"ID":"724b6630-d284-4fbe-83b9-96cd1634248f","Type":"ContainerStarted","Data":"0a2053fde339cec968caad9c92fb8115571ea53cd30b68883259f9bea067d4d1"} Nov 25 09:49:03 crc kubenswrapper[4734]: I1125 09:49:03.529025 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:03 crc kubenswrapper[4734]: I1125 09:49:03.553389 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" podStartSLOduration=2.553368001 podStartE2EDuration="2.553368001s" podCreationTimestamp="2025-11-25 09:49:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:49:03.549358366 +0000 UTC m=+1266.359820360" watchObservedRunningTime="2025-11-25 09:49:03.553368001 +0000 UTC m=+1266.363829995" Nov 25 09:49:33 crc kubenswrapper[4734]: I1125 09:49:33.793964 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.475031 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-2jzk5"] Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.480313 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-4fssf"] Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.485490 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-2jzk5"] Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.491475 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-4fssf"] Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.508757 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-5f67bc6985-689b7"] Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.546179 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystonecfaa-account-delete-qndng"] Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.547281 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.555583 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystonecfaa-account-delete-qndng"] Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.576677 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spblb\" (UniqueName: \"kubernetes.io/projected/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-kube-api-access-spblb\") pod \"keystonecfaa-account-delete-qndng\" (UID: \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\") " pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.576943 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-operator-scripts\") pod \"keystonecfaa-account-delete-qndng\" (UID: \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\") " pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.679031 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spblb\" (UniqueName: \"kubernetes.io/projected/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-kube-api-access-spblb\") pod \"keystonecfaa-account-delete-qndng\" (UID: \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\") " pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.679119 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-operator-scripts\") pod \"keystonecfaa-account-delete-qndng\" (UID: \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\") " pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.680164 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-operator-scripts\") pod \"keystonecfaa-account-delete-qndng\" (UID: \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\") " pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.699045 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spblb\" (UniqueName: \"kubernetes.io/projected/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-kube-api-access-spblb\") pod \"keystonecfaa-account-delete-qndng\" (UID: \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\") " pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.731163 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" podUID="724b6630-d284-4fbe-83b9-96cd1634248f" containerName="keystone-api" containerID="cri-o://bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2" gracePeriod=30 Nov 25 09:49:34 crc kubenswrapper[4734]: I1125 09:49:34.909954 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:35 crc kubenswrapper[4734]: I1125 09:49:35.317098 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystonecfaa-account-delete-qndng"] Nov 25 09:49:35 crc kubenswrapper[4734]: I1125 09:49:35.743854 4734 generic.go:334] "Generic (PLEG): container finished" podID="cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d" containerID="352f24bb79461d5435843c53905eb5371b1c4ac39477d06bf848480839b99c2b" exitCode=0 Nov 25 09:49:35 crc kubenswrapper[4734]: I1125 09:49:35.743912 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" event={"ID":"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d","Type":"ContainerDied","Data":"352f24bb79461d5435843c53905eb5371b1c4ac39477d06bf848480839b99c2b"} Nov 25 09:49:35 crc kubenswrapper[4734]: I1125 09:49:35.744231 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" event={"ID":"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d","Type":"ContainerStarted","Data":"b2cc8f92eb1ea02418dca88fff39b54dd8077c025834a139762fdf33b49b8d4d"} Nov 25 09:49:36 crc kubenswrapper[4734]: I1125 09:49:36.254611 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a36997ba-eaa6-4539-ba90-a1b9d147fc2d" path="/var/lib/kubelet/pods/a36997ba-eaa6-4539-ba90-a1b9d147fc2d/volumes" Nov 25 09:49:36 crc kubenswrapper[4734]: I1125 09:49:36.255343 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f626b2c0-663c-49e0-b5ad-fdc1edec33c9" path="/var/lib/kubelet/pods/f626b2c0-663c-49e0-b5ad-fdc1edec33c9/volumes" Nov 25 09:49:36 crc kubenswrapper[4734]: I1125 09:49:36.976689 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.012507 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-operator-scripts\") pod \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\" (UID: \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\") " Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.012583 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spblb\" (UniqueName: \"kubernetes.io/projected/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-kube-api-access-spblb\") pod \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\" (UID: \"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d\") " Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.014240 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d" (UID: "cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.020536 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-kube-api-access-spblb" (OuterVolumeSpecName: "kube-api-access-spblb") pod "cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d" (UID: "cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d"). InnerVolumeSpecName "kube-api-access-spblb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.113632 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spblb\" (UniqueName: \"kubernetes.io/projected/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-kube-api-access-spblb\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.113663 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.759260 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" event={"ID":"cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d","Type":"ContainerDied","Data":"b2cc8f92eb1ea02418dca88fff39b54dd8077c025834a139762fdf33b49b8d4d"} Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.759622 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2cc8f92eb1ea02418dca88fff39b54dd8077c025834a139762fdf33b49b8d4d" Nov 25 09:49:37 crc kubenswrapper[4734]: I1125 09:49:37.759392 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystonecfaa-account-delete-qndng" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.121775 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.228477 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-public-tls-certs\") pod \"724b6630-d284-4fbe-83b9-96cd1634248f\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.228542 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-combined-ca-bundle\") pod \"724b6630-d284-4fbe-83b9-96cd1634248f\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.228572 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-fernet-keys\") pod \"724b6630-d284-4fbe-83b9-96cd1634248f\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.228631 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-credential-keys\") pod \"724b6630-d284-4fbe-83b9-96cd1634248f\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.228785 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-scripts\") pod \"724b6630-d284-4fbe-83b9-96cd1634248f\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.228809 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-config-data\") pod \"724b6630-d284-4fbe-83b9-96cd1634248f\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.228835 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r947q\" (UniqueName: \"kubernetes.io/projected/724b6630-d284-4fbe-83b9-96cd1634248f-kube-api-access-r947q\") pod \"724b6630-d284-4fbe-83b9-96cd1634248f\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.228869 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-internal-tls-certs\") pod \"724b6630-d284-4fbe-83b9-96cd1634248f\" (UID: \"724b6630-d284-4fbe-83b9-96cd1634248f\") " Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.233216 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-scripts" (OuterVolumeSpecName: "scripts") pod "724b6630-d284-4fbe-83b9-96cd1634248f" (UID: "724b6630-d284-4fbe-83b9-96cd1634248f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.233540 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "724b6630-d284-4fbe-83b9-96cd1634248f" (UID: "724b6630-d284-4fbe-83b9-96cd1634248f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.233560 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "724b6630-d284-4fbe-83b9-96cd1634248f" (UID: "724b6630-d284-4fbe-83b9-96cd1634248f"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.242266 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/724b6630-d284-4fbe-83b9-96cd1634248f-kube-api-access-r947q" (OuterVolumeSpecName: "kube-api-access-r947q") pod "724b6630-d284-4fbe-83b9-96cd1634248f" (UID: "724b6630-d284-4fbe-83b9-96cd1634248f"). InnerVolumeSpecName "kube-api-access-r947q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.250279 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-config-data" (OuterVolumeSpecName: "config-data") pod "724b6630-d284-4fbe-83b9-96cd1634248f" (UID: "724b6630-d284-4fbe-83b9-96cd1634248f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.251118 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "724b6630-d284-4fbe-83b9-96cd1634248f" (UID: "724b6630-d284-4fbe-83b9-96cd1634248f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.266955 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "724b6630-d284-4fbe-83b9-96cd1634248f" (UID: "724b6630-d284-4fbe-83b9-96cd1634248f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.266983 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "724b6630-d284-4fbe-83b9-96cd1634248f" (UID: "724b6630-d284-4fbe-83b9-96cd1634248f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.330424 4734 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.330463 4734 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.330473 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.330481 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.330490 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.330503 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.330514 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r947q\" (UniqueName: \"kubernetes.io/projected/724b6630-d284-4fbe-83b9-96cd1634248f-kube-api-access-r947q\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.330526 4734 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/724b6630-d284-4fbe-83b9-96cd1634248f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.782403 4734 generic.go:334] "Generic (PLEG): container finished" podID="724b6630-d284-4fbe-83b9-96cd1634248f" containerID="bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2" exitCode=0 Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.782446 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" event={"ID":"724b6630-d284-4fbe-83b9-96cd1634248f","Type":"ContainerDied","Data":"bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2"} Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.782462 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.782483 4734 scope.go:117] "RemoveContainer" containerID="bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.782472 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-5f67bc6985-689b7" event={"ID":"724b6630-d284-4fbe-83b9-96cd1634248f","Type":"ContainerDied","Data":"0a2053fde339cec968caad9c92fb8115571ea53cd30b68883259f9bea067d4d1"} Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.813930 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-5f67bc6985-689b7"] Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.816899 4734 scope.go:117] "RemoveContainer" containerID="bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2" Nov 25 09:49:38 crc kubenswrapper[4734]: E1125 09:49:38.817229 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2\": container with ID starting with bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2 not found: ID does not exist" containerID="bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.817355 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2"} err="failed to get container status \"bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2\": rpc error: code = NotFound desc = could not find container \"bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2\": container with ID starting with bc6909784f4f0f12e1fe5d18eed2cd45705c1af031763f5a0b57c79e7aadf6d2 not found: ID does not exist" Nov 25 09:49:38 crc kubenswrapper[4734]: I1125 09:49:38.819055 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-5f67bc6985-689b7"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.577006 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-6ssj6"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.584156 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-6ssj6"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.589581 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.594444 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-cfaa-account-create-update-ngxcx"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.598498 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystonecfaa-account-delete-qndng"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.602244 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystonecfaa-account-delete-qndng"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.815386 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-create-2pgwg"] Nov 25 09:49:39 crc kubenswrapper[4734]: E1125 09:49:39.815696 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d" containerName="mariadb-account-delete" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.815710 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d" containerName="mariadb-account-delete" Nov 25 09:49:39 crc kubenswrapper[4734]: E1125 09:49:39.815730 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="724b6630-d284-4fbe-83b9-96cd1634248f" containerName="keystone-api" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.815738 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="724b6630-d284-4fbe-83b9-96cd1634248f" containerName="keystone-api" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.815844 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d" containerName="mariadb-account-delete" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.815859 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="724b6630-d284-4fbe-83b9-96cd1634248f" containerName="keystone-api" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.816452 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.822534 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-4038-account-create-update-whf6w"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.823678 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.825643 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-db-secret" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.829234 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-2pgwg"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.834380 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-4038-account-create-update-whf6w"] Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.848440 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-operator-scripts\") pod \"keystone-4038-account-create-update-whf6w\" (UID: \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\") " pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.848507 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmm65\" (UniqueName: \"kubernetes.io/projected/1273e204-b821-42e0-a481-a0709c74e28a-kube-api-access-pmm65\") pod \"keystone-db-create-2pgwg\" (UID: \"1273e204-b821-42e0-a481-a0709c74e28a\") " pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.848549 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1273e204-b821-42e0-a481-a0709c74e28a-operator-scripts\") pod \"keystone-db-create-2pgwg\" (UID: \"1273e204-b821-42e0-a481-a0709c74e28a\") " pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.848585 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9fbj\" (UniqueName: \"kubernetes.io/projected/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-kube-api-access-l9fbj\") pod \"keystone-4038-account-create-update-whf6w\" (UID: \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\") " pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.950004 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmm65\" (UniqueName: \"kubernetes.io/projected/1273e204-b821-42e0-a481-a0709c74e28a-kube-api-access-pmm65\") pod \"keystone-db-create-2pgwg\" (UID: \"1273e204-b821-42e0-a481-a0709c74e28a\") " pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.950134 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1273e204-b821-42e0-a481-a0709c74e28a-operator-scripts\") pod \"keystone-db-create-2pgwg\" (UID: \"1273e204-b821-42e0-a481-a0709c74e28a\") " pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.950180 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9fbj\" (UniqueName: \"kubernetes.io/projected/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-kube-api-access-l9fbj\") pod \"keystone-4038-account-create-update-whf6w\" (UID: \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\") " pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.950224 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-operator-scripts\") pod \"keystone-4038-account-create-update-whf6w\" (UID: \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\") " pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.951182 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-operator-scripts\") pod \"keystone-4038-account-create-update-whf6w\" (UID: \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\") " pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.951527 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1273e204-b821-42e0-a481-a0709c74e28a-operator-scripts\") pod \"keystone-db-create-2pgwg\" (UID: \"1273e204-b821-42e0-a481-a0709c74e28a\") " pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.969816 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9fbj\" (UniqueName: \"kubernetes.io/projected/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-kube-api-access-l9fbj\") pod \"keystone-4038-account-create-update-whf6w\" (UID: \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\") " pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:39 crc kubenswrapper[4734]: I1125 09:49:39.971592 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmm65\" (UniqueName: \"kubernetes.io/projected/1273e204-b821-42e0-a481-a0709c74e28a-kube-api-access-pmm65\") pod \"keystone-db-create-2pgwg\" (UID: \"1273e204-b821-42e0-a481-a0709c74e28a\") " pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.133478 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.143097 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.274022 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45995a25-2249-4f80-8f19-9dd95343d93b" path="/var/lib/kubelet/pods/45995a25-2249-4f80-8f19-9dd95343d93b/volumes" Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.275422 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="724b6630-d284-4fbe-83b9-96cd1634248f" path="/var/lib/kubelet/pods/724b6630-d284-4fbe-83b9-96cd1634248f/volumes" Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.276069 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa726887-07d2-4e56-ae70-c21f72b81e34" path="/var/lib/kubelet/pods/aa726887-07d2-4e56-ae70-c21f72b81e34/volumes" Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.276908 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d" path="/var/lib/kubelet/pods/cae33b6d-d7d4-4fdb-bb69-4fa5e3a49d6d/volumes" Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.569143 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-4038-account-create-update-whf6w"] Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.630658 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-2pgwg"] Nov 25 09:49:40 crc kubenswrapper[4734]: W1125 09:49:40.630985 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1273e204_b821_42e0_a481_a0709c74e28a.slice/crio-f54e2b9102685fbc19cc38e7230a0e53d61bd327b23fc7ac12c57c42a6715b05 WatchSource:0}: Error finding container f54e2b9102685fbc19cc38e7230a0e53d61bd327b23fc7ac12c57c42a6715b05: Status 404 returned error can't find the container with id f54e2b9102685fbc19cc38e7230a0e53d61bd327b23fc7ac12c57c42a6715b05 Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.799898 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" event={"ID":"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7","Type":"ContainerStarted","Data":"5a2b384d11064db35b5b9fada2a649959f493296388f8f3bf76b0c3115779377"} Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.799956 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" event={"ID":"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7","Type":"ContainerStarted","Data":"d9b4c49461041f61bcc4a46dd75224f491604df1b4c4ef33e93d6057ff04ad23"} Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.801707 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" event={"ID":"1273e204-b821-42e0-a481-a0709c74e28a","Type":"ContainerStarted","Data":"97a3b4e9bd33c43ff1cd5a5bc20eea5b4b0df122159b03f723288444ed3dc2f8"} Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.801759 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" event={"ID":"1273e204-b821-42e0-a481-a0709c74e28a","Type":"ContainerStarted","Data":"f54e2b9102685fbc19cc38e7230a0e53d61bd327b23fc7ac12c57c42a6715b05"} Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.816009 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" podStartSLOduration=1.815993127 podStartE2EDuration="1.815993127s" podCreationTimestamp="2025-11-25 09:49:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:49:40.813838415 +0000 UTC m=+1303.624300409" watchObservedRunningTime="2025-11-25 09:49:40.815993127 +0000 UTC m=+1303.626455121" Nov 25 09:49:40 crc kubenswrapper[4734]: I1125 09:49:40.834247 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" podStartSLOduration=1.834230892 podStartE2EDuration="1.834230892s" podCreationTimestamp="2025-11-25 09:49:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:49:40.830668399 +0000 UTC m=+1303.641130413" watchObservedRunningTime="2025-11-25 09:49:40.834230892 +0000 UTC m=+1303.644692886" Nov 25 09:49:41 crc kubenswrapper[4734]: I1125 09:49:41.818879 4734 generic.go:334] "Generic (PLEG): container finished" podID="fc40edc0-09a7-4e15-9f87-600a8b3ee0f7" containerID="5a2b384d11064db35b5b9fada2a649959f493296388f8f3bf76b0c3115779377" exitCode=0 Nov 25 09:49:41 crc kubenswrapper[4734]: I1125 09:49:41.819331 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" event={"ID":"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7","Type":"ContainerDied","Data":"5a2b384d11064db35b5b9fada2a649959f493296388f8f3bf76b0c3115779377"} Nov 25 09:49:41 crc kubenswrapper[4734]: I1125 09:49:41.822234 4734 generic.go:334] "Generic (PLEG): container finished" podID="1273e204-b821-42e0-a481-a0709c74e28a" containerID="97a3b4e9bd33c43ff1cd5a5bc20eea5b4b0df122159b03f723288444ed3dc2f8" exitCode=0 Nov 25 09:49:41 crc kubenswrapper[4734]: I1125 09:49:41.822267 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" event={"ID":"1273e204-b821-42e0-a481-a0709c74e28a","Type":"ContainerDied","Data":"97a3b4e9bd33c43ff1cd5a5bc20eea5b4b0df122159b03f723288444ed3dc2f8"} Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.165003 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.171048 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.191801 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmm65\" (UniqueName: \"kubernetes.io/projected/1273e204-b821-42e0-a481-a0709c74e28a-kube-api-access-pmm65\") pod \"1273e204-b821-42e0-a481-a0709c74e28a\" (UID: \"1273e204-b821-42e0-a481-a0709c74e28a\") " Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.191864 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1273e204-b821-42e0-a481-a0709c74e28a-operator-scripts\") pod \"1273e204-b821-42e0-a481-a0709c74e28a\" (UID: \"1273e204-b821-42e0-a481-a0709c74e28a\") " Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.192000 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-operator-scripts\") pod \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\" (UID: \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\") " Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.192112 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9fbj\" (UniqueName: \"kubernetes.io/projected/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-kube-api-access-l9fbj\") pod \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\" (UID: \"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7\") " Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.193187 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fc40edc0-09a7-4e15-9f87-600a8b3ee0f7" (UID: "fc40edc0-09a7-4e15-9f87-600a8b3ee0f7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.193637 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1273e204-b821-42e0-a481-a0709c74e28a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1273e204-b821-42e0-a481-a0709c74e28a" (UID: "1273e204-b821-42e0-a481-a0709c74e28a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.198656 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1273e204-b821-42e0-a481-a0709c74e28a-kube-api-access-pmm65" (OuterVolumeSpecName: "kube-api-access-pmm65") pod "1273e204-b821-42e0-a481-a0709c74e28a" (UID: "1273e204-b821-42e0-a481-a0709c74e28a"). InnerVolumeSpecName "kube-api-access-pmm65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.199409 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-kube-api-access-l9fbj" (OuterVolumeSpecName: "kube-api-access-l9fbj") pod "fc40edc0-09a7-4e15-9f87-600a8b3ee0f7" (UID: "fc40edc0-09a7-4e15-9f87-600a8b3ee0f7"). InnerVolumeSpecName "kube-api-access-l9fbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.292920 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9fbj\" (UniqueName: \"kubernetes.io/projected/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-kube-api-access-l9fbj\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.292959 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmm65\" (UniqueName: \"kubernetes.io/projected/1273e204-b821-42e0-a481-a0709c74e28a-kube-api-access-pmm65\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.292988 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1273e204-b821-42e0-a481-a0709c74e28a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.292998 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.837691 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" event={"ID":"1273e204-b821-42e0-a481-a0709c74e28a","Type":"ContainerDied","Data":"f54e2b9102685fbc19cc38e7230a0e53d61bd327b23fc7ac12c57c42a6715b05"} Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.837739 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f54e2b9102685fbc19cc38e7230a0e53d61bd327b23fc7ac12c57c42a6715b05" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.837805 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-2pgwg" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.840150 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" event={"ID":"fc40edc0-09a7-4e15-9f87-600a8b3ee0f7","Type":"ContainerDied","Data":"d9b4c49461041f61bcc4a46dd75224f491604df1b4c4ef33e93d6057ff04ad23"} Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.840181 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-4038-account-create-update-whf6w" Nov 25 09:49:43 crc kubenswrapper[4734]: I1125 09:49:43.840196 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9b4c49461041f61bcc4a46dd75224f491604df1b4c4ef33e93d6057ff04ad23" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.462727 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-dp588"] Nov 25 09:49:45 crc kubenswrapper[4734]: E1125 09:49:45.463231 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1273e204-b821-42e0-a481-a0709c74e28a" containerName="mariadb-database-create" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.463244 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="1273e204-b821-42e0-a481-a0709c74e28a" containerName="mariadb-database-create" Nov 25 09:49:45 crc kubenswrapper[4734]: E1125 09:49:45.463259 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc40edc0-09a7-4e15-9f87-600a8b3ee0f7" containerName="mariadb-account-create-update" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.463265 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc40edc0-09a7-4e15-9f87-600a8b3ee0f7" containerName="mariadb-account-create-update" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.463393 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="1273e204-b821-42e0-a481-a0709c74e28a" containerName="mariadb-database-create" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.463409 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc40edc0-09a7-4e15-9f87-600a8b3ee0f7" containerName="mariadb-account-create-update" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.463810 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.465303 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.465337 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.466355 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-w4knt" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.471164 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.481148 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-dp588"] Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.621290 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z585l\" (UniqueName: \"kubernetes.io/projected/815ea782-38dd-48b9-811c-8345cf61a251-kube-api-access-z585l\") pod \"keystone-db-sync-dp588\" (UID: \"815ea782-38dd-48b9-811c-8345cf61a251\") " pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.621340 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/815ea782-38dd-48b9-811c-8345cf61a251-config-data\") pod \"keystone-db-sync-dp588\" (UID: \"815ea782-38dd-48b9-811c-8345cf61a251\") " pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.722598 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z585l\" (UniqueName: \"kubernetes.io/projected/815ea782-38dd-48b9-811c-8345cf61a251-kube-api-access-z585l\") pod \"keystone-db-sync-dp588\" (UID: \"815ea782-38dd-48b9-811c-8345cf61a251\") " pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.722677 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/815ea782-38dd-48b9-811c-8345cf61a251-config-data\") pod \"keystone-db-sync-dp588\" (UID: \"815ea782-38dd-48b9-811c-8345cf61a251\") " pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.726973 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/815ea782-38dd-48b9-811c-8345cf61a251-config-data\") pod \"keystone-db-sync-dp588\" (UID: \"815ea782-38dd-48b9-811c-8345cf61a251\") " pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.740762 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z585l\" (UniqueName: \"kubernetes.io/projected/815ea782-38dd-48b9-811c-8345cf61a251-kube-api-access-z585l\") pod \"keystone-db-sync-dp588\" (UID: \"815ea782-38dd-48b9-811c-8345cf61a251\") " pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.779603 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:45 crc kubenswrapper[4734]: I1125 09:49:45.972324 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-dp588"] Nov 25 09:49:46 crc kubenswrapper[4734]: I1125 09:49:46.864679 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-dp588" event={"ID":"815ea782-38dd-48b9-811c-8345cf61a251","Type":"ContainerStarted","Data":"bfd93691b8978dd518e7dbc577103b8c0996ae2578be11c554a8270abacf957e"} Nov 25 09:49:46 crc kubenswrapper[4734]: I1125 09:49:46.864996 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-dp588" event={"ID":"815ea782-38dd-48b9-811c-8345cf61a251","Type":"ContainerStarted","Data":"373070e06f8f9cd086d4962721804730ce8e86a34e2a73eeb6c92d458e71918b"} Nov 25 09:49:46 crc kubenswrapper[4734]: I1125 09:49:46.882835 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-sync-dp588" podStartSLOduration=1.882818061 podStartE2EDuration="1.882818061s" podCreationTimestamp="2025-11-25 09:49:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:49:46.880861025 +0000 UTC m=+1309.691323019" watchObservedRunningTime="2025-11-25 09:49:46.882818061 +0000 UTC m=+1309.693280055" Nov 25 09:49:47 crc kubenswrapper[4734]: I1125 09:49:47.872432 4734 generic.go:334] "Generic (PLEG): container finished" podID="815ea782-38dd-48b9-811c-8345cf61a251" containerID="bfd93691b8978dd518e7dbc577103b8c0996ae2578be11c554a8270abacf957e" exitCode=0 Nov 25 09:49:47 crc kubenswrapper[4734]: I1125 09:49:47.872489 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-dp588" event={"ID":"815ea782-38dd-48b9-811c-8345cf61a251","Type":"ContainerDied","Data":"bfd93691b8978dd518e7dbc577103b8c0996ae2578be11c554a8270abacf957e"} Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.151032 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.293981 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/815ea782-38dd-48b9-811c-8345cf61a251-config-data\") pod \"815ea782-38dd-48b9-811c-8345cf61a251\" (UID: \"815ea782-38dd-48b9-811c-8345cf61a251\") " Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.294041 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z585l\" (UniqueName: \"kubernetes.io/projected/815ea782-38dd-48b9-811c-8345cf61a251-kube-api-access-z585l\") pod \"815ea782-38dd-48b9-811c-8345cf61a251\" (UID: \"815ea782-38dd-48b9-811c-8345cf61a251\") " Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.301392 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/815ea782-38dd-48b9-811c-8345cf61a251-kube-api-access-z585l" (OuterVolumeSpecName: "kube-api-access-z585l") pod "815ea782-38dd-48b9-811c-8345cf61a251" (UID: "815ea782-38dd-48b9-811c-8345cf61a251"). InnerVolumeSpecName "kube-api-access-z585l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.326004 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/815ea782-38dd-48b9-811c-8345cf61a251-config-data" (OuterVolumeSpecName: "config-data") pod "815ea782-38dd-48b9-811c-8345cf61a251" (UID: "815ea782-38dd-48b9-811c-8345cf61a251"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.395931 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/815ea782-38dd-48b9-811c-8345cf61a251-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.395976 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z585l\" (UniqueName: \"kubernetes.io/projected/815ea782-38dd-48b9-811c-8345cf61a251-kube-api-access-z585l\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.889505 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-dp588" event={"ID":"815ea782-38dd-48b9-811c-8345cf61a251","Type":"ContainerDied","Data":"373070e06f8f9cd086d4962721804730ce8e86a34e2a73eeb6c92d458e71918b"} Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.889556 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="373070e06f8f9cd086d4962721804730ce8e86a34e2a73eeb6c92d458e71918b" Nov 25 09:49:49 crc kubenswrapper[4734]: I1125 09:49:49.889629 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-dp588" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.089988 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-trl8m"] Nov 25 09:49:50 crc kubenswrapper[4734]: E1125 09:49:50.090333 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="815ea782-38dd-48b9-811c-8345cf61a251" containerName="keystone-db-sync" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.090357 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="815ea782-38dd-48b9-811c-8345cf61a251" containerName="keystone-db-sync" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.090516 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="815ea782-38dd-48b9-811c-8345cf61a251" containerName="keystone-db-sync" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.091021 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.094484 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.094631 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"osp-secret" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.094707 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.094736 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-w4knt" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.095123 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.104742 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsdkj\" (UniqueName: \"kubernetes.io/projected/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-kube-api-access-zsdkj\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.104843 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-fernet-keys\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.104929 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-config-data\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.104981 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-credential-keys\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.105052 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-scripts\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.107249 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-trl8m"] Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.205908 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-config-data\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.205963 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-credential-keys\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.206006 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-scripts\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.206044 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsdkj\" (UniqueName: \"kubernetes.io/projected/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-kube-api-access-zsdkj\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.206099 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-fernet-keys\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.209569 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-credential-keys\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.210365 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-config-data\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.217790 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-fernet-keys\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.222641 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsdkj\" (UniqueName: \"kubernetes.io/projected/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-kube-api-access-zsdkj\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.235507 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-scripts\") pod \"keystone-bootstrap-trl8m\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.408753 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.603718 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-trl8m"] Nov 25 09:49:50 crc kubenswrapper[4734]: W1125 09:49:50.608877 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7a4a2fe_a6f6_4104_bd14_eb78a776f227.slice/crio-7acb339de2529ef3e97e2a0c1eb300adc713d4f2791e291dc968cda181d4c6dd WatchSource:0}: Error finding container 7acb339de2529ef3e97e2a0c1eb300adc713d4f2791e291dc968cda181d4c6dd: Status 404 returned error can't find the container with id 7acb339de2529ef3e97e2a0c1eb300adc713d4f2791e291dc968cda181d4c6dd Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.898057 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" event={"ID":"e7a4a2fe-a6f6-4104-bd14-eb78a776f227","Type":"ContainerStarted","Data":"4aad815f45c5844f21d839588a7ea82c52a0606e98855e801a6f3e77906d2164"} Nov 25 09:49:50 crc kubenswrapper[4734]: I1125 09:49:50.898420 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" event={"ID":"e7a4a2fe-a6f6-4104-bd14-eb78a776f227","Type":"ContainerStarted","Data":"7acb339de2529ef3e97e2a0c1eb300adc713d4f2791e291dc968cda181d4c6dd"} Nov 25 09:49:53 crc kubenswrapper[4734]: I1125 09:49:53.921736 4734 generic.go:334] "Generic (PLEG): container finished" podID="e7a4a2fe-a6f6-4104-bd14-eb78a776f227" containerID="4aad815f45c5844f21d839588a7ea82c52a0606e98855e801a6f3e77906d2164" exitCode=0 Nov 25 09:49:53 crc kubenswrapper[4734]: I1125 09:49:53.921999 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" event={"ID":"e7a4a2fe-a6f6-4104-bd14-eb78a776f227","Type":"ContainerDied","Data":"4aad815f45c5844f21d839588a7ea82c52a0606e98855e801a6f3e77906d2164"} Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.201720 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.386329 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-fernet-keys\") pod \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.386416 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsdkj\" (UniqueName: \"kubernetes.io/projected/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-kube-api-access-zsdkj\") pod \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.386474 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-scripts\") pod \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.386549 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-config-data\") pod \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.386628 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-credential-keys\") pod \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\" (UID: \"e7a4a2fe-a6f6-4104-bd14-eb78a776f227\") " Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.392915 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "e7a4a2fe-a6f6-4104-bd14-eb78a776f227" (UID: "e7a4a2fe-a6f6-4104-bd14-eb78a776f227"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.393184 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e7a4a2fe-a6f6-4104-bd14-eb78a776f227" (UID: "e7a4a2fe-a6f6-4104-bd14-eb78a776f227"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.399875 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-scripts" (OuterVolumeSpecName: "scripts") pod "e7a4a2fe-a6f6-4104-bd14-eb78a776f227" (UID: "e7a4a2fe-a6f6-4104-bd14-eb78a776f227"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.399878 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-kube-api-access-zsdkj" (OuterVolumeSpecName: "kube-api-access-zsdkj") pod "e7a4a2fe-a6f6-4104-bd14-eb78a776f227" (UID: "e7a4a2fe-a6f6-4104-bd14-eb78a776f227"). InnerVolumeSpecName "kube-api-access-zsdkj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.409823 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-config-data" (OuterVolumeSpecName: "config-data") pod "e7a4a2fe-a6f6-4104-bd14-eb78a776f227" (UID: "e7a4a2fe-a6f6-4104-bd14-eb78a776f227"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.488233 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.488273 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsdkj\" (UniqueName: \"kubernetes.io/projected/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-kube-api-access-zsdkj\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.488287 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.488296 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.488305 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e7a4a2fe-a6f6-4104-bd14-eb78a776f227-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.939273 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" event={"ID":"e7a4a2fe-a6f6-4104-bd14-eb78a776f227","Type":"ContainerDied","Data":"7acb339de2529ef3e97e2a0c1eb300adc713d4f2791e291dc968cda181d4c6dd"} Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.939329 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7acb339de2529ef3e97e2a0c1eb300adc713d4f2791e291dc968cda181d4c6dd" Nov 25 09:49:55 crc kubenswrapper[4734]: I1125 09:49:55.939353 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-trl8m" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.021833 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-657fb656c4-swl45"] Nov 25 09:49:56 crc kubenswrapper[4734]: E1125 09:49:56.022057 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7a4a2fe-a6f6-4104-bd14-eb78a776f227" containerName="keystone-bootstrap" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.022068 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7a4a2fe-a6f6-4104-bd14-eb78a776f227" containerName="keystone-bootstrap" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.022193 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7a4a2fe-a6f6-4104-bd14-eb78a776f227" containerName="keystone-bootstrap" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.022644 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.025073 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.025180 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-w4knt" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.027426 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.027459 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.032391 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-657fb656c4-swl45"] Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.096577 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-scripts\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.096688 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-fernet-keys\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.096723 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl5xx\" (UniqueName: \"kubernetes.io/projected/9b86c553-9e17-467e-bab0-f6d841797325-kube-api-access-bl5xx\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.096815 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-credential-keys\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.096839 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-config-data\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.198254 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-credential-keys\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.198373 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-config-data\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.198895 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-scripts\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.198973 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-fernet-keys\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.199014 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl5xx\" (UniqueName: \"kubernetes.io/projected/9b86c553-9e17-467e-bab0-f6d841797325-kube-api-access-bl5xx\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.202499 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-scripts\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.202523 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-credential-keys\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.202736 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-config-data\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.203331 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-fernet-keys\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.216897 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl5xx\" (UniqueName: \"kubernetes.io/projected/9b86c553-9e17-467e-bab0-f6d841797325-kube-api-access-bl5xx\") pod \"keystone-657fb656c4-swl45\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:56 crc kubenswrapper[4734]: I1125 09:49:56.339033 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:57 crc kubenswrapper[4734]: I1125 09:49:57.236222 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-657fb656c4-swl45"] Nov 25 09:49:57 crc kubenswrapper[4734]: W1125 09:49:57.245304 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b86c553_9e17_467e_bab0_f6d841797325.slice/crio-8df2ad904038e3027f29e15af30d97e338af0e52de29005054fa5df6b8d732b3 WatchSource:0}: Error finding container 8df2ad904038e3027f29e15af30d97e338af0e52de29005054fa5df6b8d732b3: Status 404 returned error can't find the container with id 8df2ad904038e3027f29e15af30d97e338af0e52de29005054fa5df6b8d732b3 Nov 25 09:49:57 crc kubenswrapper[4734]: I1125 09:49:57.952846 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" event={"ID":"9b86c553-9e17-467e-bab0-f6d841797325","Type":"ContainerStarted","Data":"b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36"} Nov 25 09:49:57 crc kubenswrapper[4734]: I1125 09:49:57.953192 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" event={"ID":"9b86c553-9e17-467e-bab0-f6d841797325","Type":"ContainerStarted","Data":"8df2ad904038e3027f29e15af30d97e338af0e52de29005054fa5df6b8d732b3"} Nov 25 09:49:57 crc kubenswrapper[4734]: I1125 09:49:57.953212 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:49:57 crc kubenswrapper[4734]: I1125 09:49:57.971242 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" podStartSLOduration=1.971221501 podStartE2EDuration="1.971221501s" podCreationTimestamp="2025-11-25 09:49:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:49:57.965708422 +0000 UTC m=+1320.776170436" watchObservedRunningTime="2025-11-25 09:49:57.971221501 +0000 UTC m=+1320.781683495" Nov 25 09:50:20 crc kubenswrapper[4734]: I1125 09:50:20.696238 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:50:20 crc kubenswrapper[4734]: I1125 09:50:20.696701 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:50:27 crc kubenswrapper[4734]: I1125 09:50:27.779260 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.325655 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-trl8m"] Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.331356 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-trl8m"] Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.336872 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-dp588"] Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.343829 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-dp588"] Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.352366 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-657fb656c4-swl45"] Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.352630 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" podUID="9b86c553-9e17-467e-bab0-f6d841797325" containerName="keystone-api" containerID="cri-o://b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36" gracePeriod=30 Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.371077 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone4038-account-delete-kjl8t"] Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.371807 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.386197 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone4038-account-delete-kjl8t"] Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.488130 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/917dd005-c5fd-426f-b78f-c41fff0fdf4e-operator-scripts\") pod \"keystone4038-account-delete-kjl8t\" (UID: \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\") " pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.488420 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slwpz\" (UniqueName: \"kubernetes.io/projected/917dd005-c5fd-426f-b78f-c41fff0fdf4e-kube-api-access-slwpz\") pod \"keystone4038-account-delete-kjl8t\" (UID: \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\") " pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.589272 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slwpz\" (UniqueName: \"kubernetes.io/projected/917dd005-c5fd-426f-b78f-c41fff0fdf4e-kube-api-access-slwpz\") pod \"keystone4038-account-delete-kjl8t\" (UID: \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\") " pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.589389 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/917dd005-c5fd-426f-b78f-c41fff0fdf4e-operator-scripts\") pod \"keystone4038-account-delete-kjl8t\" (UID: \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\") " pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.590161 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/917dd005-c5fd-426f-b78f-c41fff0fdf4e-operator-scripts\") pod \"keystone4038-account-delete-kjl8t\" (UID: \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\") " pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.609516 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slwpz\" (UniqueName: \"kubernetes.io/projected/917dd005-c5fd-426f-b78f-c41fff0fdf4e-kube-api-access-slwpz\") pod \"keystone4038-account-delete-kjl8t\" (UID: \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\") " pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.692131 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:44 crc kubenswrapper[4734]: I1125 09:50:44.901798 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone4038-account-delete-kjl8t"] Nov 25 09:50:45 crc kubenswrapper[4734]: I1125 09:50:45.311974 4734 generic.go:334] "Generic (PLEG): container finished" podID="917dd005-c5fd-426f-b78f-c41fff0fdf4e" containerID="58a58cfaa371de7b838b71ea899286d65364ca3c38ef5f2b7a0091b213e60c72" exitCode=0 Nov 25 09:50:45 crc kubenswrapper[4734]: I1125 09:50:45.312030 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" event={"ID":"917dd005-c5fd-426f-b78f-c41fff0fdf4e","Type":"ContainerDied","Data":"58a58cfaa371de7b838b71ea899286d65364ca3c38ef5f2b7a0091b213e60c72"} Nov 25 09:50:45 crc kubenswrapper[4734]: I1125 09:50:45.312616 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" event={"ID":"917dd005-c5fd-426f-b78f-c41fff0fdf4e","Type":"ContainerStarted","Data":"cbb3783f3ff8e7334a88262458dffddf09ff79a96ed5482c71bf04ba80029290"} Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.257014 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="815ea782-38dd-48b9-811c-8345cf61a251" path="/var/lib/kubelet/pods/815ea782-38dd-48b9-811c-8345cf61a251/volumes" Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.257612 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7a4a2fe-a6f6-4104-bd14-eb78a776f227" path="/var/lib/kubelet/pods/e7a4a2fe-a6f6-4104-bd14-eb78a776f227/volumes" Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.584371 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.728780 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slwpz\" (UniqueName: \"kubernetes.io/projected/917dd005-c5fd-426f-b78f-c41fff0fdf4e-kube-api-access-slwpz\") pod \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\" (UID: \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\") " Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.728877 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/917dd005-c5fd-426f-b78f-c41fff0fdf4e-operator-scripts\") pod \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\" (UID: \"917dd005-c5fd-426f-b78f-c41fff0fdf4e\") " Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.729558 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917dd005-c5fd-426f-b78f-c41fff0fdf4e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "917dd005-c5fd-426f-b78f-c41fff0fdf4e" (UID: "917dd005-c5fd-426f-b78f-c41fff0fdf4e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.734435 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/917dd005-c5fd-426f-b78f-c41fff0fdf4e-kube-api-access-slwpz" (OuterVolumeSpecName: "kube-api-access-slwpz") pod "917dd005-c5fd-426f-b78f-c41fff0fdf4e" (UID: "917dd005-c5fd-426f-b78f-c41fff0fdf4e"). InnerVolumeSpecName "kube-api-access-slwpz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.830905 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slwpz\" (UniqueName: \"kubernetes.io/projected/917dd005-c5fd-426f-b78f-c41fff0fdf4e-kube-api-access-slwpz\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:46 crc kubenswrapper[4734]: I1125 09:50:46.830944 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/917dd005-c5fd-426f-b78f-c41fff0fdf4e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:47 crc kubenswrapper[4734]: I1125 09:50:47.327245 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" event={"ID":"917dd005-c5fd-426f-b78f-c41fff0fdf4e","Type":"ContainerDied","Data":"cbb3783f3ff8e7334a88262458dffddf09ff79a96ed5482c71bf04ba80029290"} Nov 25 09:50:47 crc kubenswrapper[4734]: I1125 09:50:47.327544 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbb3783f3ff8e7334a88262458dffddf09ff79a96ed5482c71bf04ba80029290" Nov 25 09:50:47 crc kubenswrapper[4734]: I1125 09:50:47.327279 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone4038-account-delete-kjl8t" Nov 25 09:50:47 crc kubenswrapper[4734]: I1125 09:50:47.870840 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.047661 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl5xx\" (UniqueName: \"kubernetes.io/projected/9b86c553-9e17-467e-bab0-f6d841797325-kube-api-access-bl5xx\") pod \"9b86c553-9e17-467e-bab0-f6d841797325\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.047713 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-credential-keys\") pod \"9b86c553-9e17-467e-bab0-f6d841797325\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.047749 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-config-data\") pod \"9b86c553-9e17-467e-bab0-f6d841797325\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.048505 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-scripts\") pod \"9b86c553-9e17-467e-bab0-f6d841797325\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.048579 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-fernet-keys\") pod \"9b86c553-9e17-467e-bab0-f6d841797325\" (UID: \"9b86c553-9e17-467e-bab0-f6d841797325\") " Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.053179 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-scripts" (OuterVolumeSpecName: "scripts") pod "9b86c553-9e17-467e-bab0-f6d841797325" (UID: "9b86c553-9e17-467e-bab0-f6d841797325"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.053448 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "9b86c553-9e17-467e-bab0-f6d841797325" (UID: "9b86c553-9e17-467e-bab0-f6d841797325"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.053582 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b86c553-9e17-467e-bab0-f6d841797325-kube-api-access-bl5xx" (OuterVolumeSpecName: "kube-api-access-bl5xx") pod "9b86c553-9e17-467e-bab0-f6d841797325" (UID: "9b86c553-9e17-467e-bab0-f6d841797325"). InnerVolumeSpecName "kube-api-access-bl5xx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.057270 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "9b86c553-9e17-467e-bab0-f6d841797325" (UID: "9b86c553-9e17-467e-bab0-f6d841797325"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.065660 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-config-data" (OuterVolumeSpecName: "config-data") pod "9b86c553-9e17-467e-bab0-f6d841797325" (UID: "9b86c553-9e17-467e-bab0-f6d841797325"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.150887 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.150982 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl5xx\" (UniqueName: \"kubernetes.io/projected/9b86c553-9e17-467e-bab0-f6d841797325-kube-api-access-bl5xx\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.150998 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.151015 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.151027 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b86c553-9e17-467e-bab0-f6d841797325-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.333883 4734 generic.go:334] "Generic (PLEG): container finished" podID="9b86c553-9e17-467e-bab0-f6d841797325" containerID="b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36" exitCode=0 Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.333924 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.333942 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" event={"ID":"9b86c553-9e17-467e-bab0-f6d841797325","Type":"ContainerDied","Data":"b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36"} Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.334021 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-657fb656c4-swl45" event={"ID":"9b86c553-9e17-467e-bab0-f6d841797325","Type":"ContainerDied","Data":"8df2ad904038e3027f29e15af30d97e338af0e52de29005054fa5df6b8d732b3"} Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.334074 4734 scope.go:117] "RemoveContainer" containerID="b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.355989 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-657fb656c4-swl45"] Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.360865 4734 scope.go:117] "RemoveContainer" containerID="b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36" Nov 25 09:50:48 crc kubenswrapper[4734]: E1125 09:50:48.361271 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36\": container with ID starting with b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36 not found: ID does not exist" containerID="b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.361339 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36"} err="failed to get container status \"b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36\": rpc error: code = NotFound desc = could not find container \"b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36\": container with ID starting with b430e7e1552e1a1a934dd3d46b0b28e5ad4065a420df4eb520cb41daa9a37c36 not found: ID does not exist" Nov 25 09:50:48 crc kubenswrapper[4734]: I1125 09:50:48.361446 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-657fb656c4-swl45"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.387873 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-2pgwg"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.391647 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-2pgwg"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.395263 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone4038-account-delete-kjl8t"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.398698 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone4038-account-delete-kjl8t"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.402231 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-4038-account-create-update-whf6w"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.405883 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-4038-account-create-update-whf6w"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.482027 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkh4p"] Nov 25 09:50:49 crc kubenswrapper[4734]: E1125 09:50:49.482333 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="917dd005-c5fd-426f-b78f-c41fff0fdf4e" containerName="mariadb-account-delete" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.482352 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="917dd005-c5fd-426f-b78f-c41fff0fdf4e" containerName="mariadb-account-delete" Nov 25 09:50:49 crc kubenswrapper[4734]: E1125 09:50:49.482377 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b86c553-9e17-467e-bab0-f6d841797325" containerName="keystone-api" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.482384 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b86c553-9e17-467e-bab0-f6d841797325" containerName="keystone-api" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.482520 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="917dd005-c5fd-426f-b78f-c41fff0fdf4e" containerName="mariadb-account-delete" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.482544 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b86c553-9e17-467e-bab0-f6d841797325" containerName="keystone-api" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.483077 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.487620 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkh4p"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.589572 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.591065 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.592829 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-db-secret" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.597057 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j"] Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.603745 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8e99e93-5f83-4568-a186-f712828e67b7-operator-scripts\") pod \"keystone-db-create-mkh4p\" (UID: \"e8e99e93-5f83-4568-a186-f712828e67b7\") " pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.603897 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4gs4\" (UniqueName: \"kubernetes.io/projected/e8e99e93-5f83-4568-a186-f712828e67b7-kube-api-access-n4gs4\") pod \"keystone-db-create-mkh4p\" (UID: \"e8e99e93-5f83-4568-a186-f712828e67b7\") " pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.705218 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4gs4\" (UniqueName: \"kubernetes.io/projected/e8e99e93-5f83-4568-a186-f712828e67b7-kube-api-access-n4gs4\") pod \"keystone-db-create-mkh4p\" (UID: \"e8e99e93-5f83-4568-a186-f712828e67b7\") " pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.705306 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-operator-scripts\") pod \"keystone-77ea-account-create-update-46t5j\" (UID: \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\") " pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.705366 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8e99e93-5f83-4568-a186-f712828e67b7-operator-scripts\") pod \"keystone-db-create-mkh4p\" (UID: \"e8e99e93-5f83-4568-a186-f712828e67b7\") " pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.705444 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t5lb\" (UniqueName: \"kubernetes.io/projected/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-kube-api-access-6t5lb\") pod \"keystone-77ea-account-create-update-46t5j\" (UID: \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\") " pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.706973 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8e99e93-5f83-4568-a186-f712828e67b7-operator-scripts\") pod \"keystone-db-create-mkh4p\" (UID: \"e8e99e93-5f83-4568-a186-f712828e67b7\") " pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.724838 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4gs4\" (UniqueName: \"kubernetes.io/projected/e8e99e93-5f83-4568-a186-f712828e67b7-kube-api-access-n4gs4\") pod \"keystone-db-create-mkh4p\" (UID: \"e8e99e93-5f83-4568-a186-f712828e67b7\") " pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.795908 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.806995 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-operator-scripts\") pod \"keystone-77ea-account-create-update-46t5j\" (UID: \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\") " pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.807134 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t5lb\" (UniqueName: \"kubernetes.io/projected/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-kube-api-access-6t5lb\") pod \"keystone-77ea-account-create-update-46t5j\" (UID: \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\") " pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.807833 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-operator-scripts\") pod \"keystone-77ea-account-create-update-46t5j\" (UID: \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\") " pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.832902 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t5lb\" (UniqueName: \"kubernetes.io/projected/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-kube-api-access-6t5lb\") pod \"keystone-77ea-account-create-update-46t5j\" (UID: \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\") " pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:49 crc kubenswrapper[4734]: I1125 09:50:49.908579 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.212520 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkh4p"] Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.260461 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1273e204-b821-42e0-a481-a0709c74e28a" path="/var/lib/kubelet/pods/1273e204-b821-42e0-a481-a0709c74e28a/volumes" Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.262213 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="917dd005-c5fd-426f-b78f-c41fff0fdf4e" path="/var/lib/kubelet/pods/917dd005-c5fd-426f-b78f-c41fff0fdf4e/volumes" Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.265341 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b86c553-9e17-467e-bab0-f6d841797325" path="/var/lib/kubelet/pods/9b86c553-9e17-467e-bab0-f6d841797325/volumes" Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.265987 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc40edc0-09a7-4e15-9f87-600a8b3ee0f7" path="/var/lib/kubelet/pods/fc40edc0-09a7-4e15-9f87-600a8b3ee0f7/volumes" Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.313239 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j"] Nov 25 09:50:50 crc kubenswrapper[4734]: W1125 09:50:50.319047 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5dcdddcc_8bac_4aae_acfa_8f10f49b98d4.slice/crio-22581b66c09f840963b66ffec84d7b49655ff5ef5924ca57929670f5d72e6829 WatchSource:0}: Error finding container 22581b66c09f840963b66ffec84d7b49655ff5ef5924ca57929670f5d72e6829: Status 404 returned error can't find the container with id 22581b66c09f840963b66ffec84d7b49655ff5ef5924ca57929670f5d72e6829 Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.350343 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" event={"ID":"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4","Type":"ContainerStarted","Data":"22581b66c09f840963b66ffec84d7b49655ff5ef5924ca57929670f5d72e6829"} Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.352209 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-mkh4p" event={"ID":"e8e99e93-5f83-4568-a186-f712828e67b7","Type":"ContainerStarted","Data":"70f7322628c1c6d5faa465a63540841e46a1ce5882317e9259c0ae2afec308c1"} Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.375316 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-create-mkh4p" podStartSLOduration=1.375285076 podStartE2EDuration="1.375285076s" podCreationTimestamp="2025-11-25 09:50:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:50:50.362912139 +0000 UTC m=+1373.173374153" watchObservedRunningTime="2025-11-25 09:50:50.375285076 +0000 UTC m=+1373.185747070" Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.695865 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:50:50 crc kubenswrapper[4734]: I1125 09:50:50.695927 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:50:51 crc kubenswrapper[4734]: I1125 09:50:51.361936 4734 generic.go:334] "Generic (PLEG): container finished" podID="e8e99e93-5f83-4568-a186-f712828e67b7" containerID="6234346b37ff6e67d0a506db2a89361b6c50d855bf5d59781f9b93f83d6073a7" exitCode=0 Nov 25 09:50:51 crc kubenswrapper[4734]: I1125 09:50:51.362042 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-mkh4p" event={"ID":"e8e99e93-5f83-4568-a186-f712828e67b7","Type":"ContainerDied","Data":"6234346b37ff6e67d0a506db2a89361b6c50d855bf5d59781f9b93f83d6073a7"} Nov 25 09:50:51 crc kubenswrapper[4734]: I1125 09:50:51.364263 4734 generic.go:334] "Generic (PLEG): container finished" podID="5dcdddcc-8bac-4aae-acfa-8f10f49b98d4" containerID="b5449337a056801c1c4a5baf1473365cf8f4363b8b63ac465c4c86a992f58728" exitCode=0 Nov 25 09:50:51 crc kubenswrapper[4734]: I1125 09:50:51.364331 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" event={"ID":"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4","Type":"ContainerDied","Data":"b5449337a056801c1c4a5baf1473365cf8f4363b8b63ac465c4c86a992f58728"} Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.694293 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.701383 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.848691 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-operator-scripts\") pod \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\" (UID: \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\") " Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.848818 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8e99e93-5f83-4568-a186-f712828e67b7-operator-scripts\") pod \"e8e99e93-5f83-4568-a186-f712828e67b7\" (UID: \"e8e99e93-5f83-4568-a186-f712828e67b7\") " Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.848884 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4gs4\" (UniqueName: \"kubernetes.io/projected/e8e99e93-5f83-4568-a186-f712828e67b7-kube-api-access-n4gs4\") pod \"e8e99e93-5f83-4568-a186-f712828e67b7\" (UID: \"e8e99e93-5f83-4568-a186-f712828e67b7\") " Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.849003 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6t5lb\" (UniqueName: \"kubernetes.io/projected/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-kube-api-access-6t5lb\") pod \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\" (UID: \"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4\") " Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.849469 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5dcdddcc-8bac-4aae-acfa-8f10f49b98d4" (UID: "5dcdddcc-8bac-4aae-acfa-8f10f49b98d4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.850158 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8e99e93-5f83-4568-a186-f712828e67b7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8e99e93-5f83-4568-a186-f712828e67b7" (UID: "e8e99e93-5f83-4568-a186-f712828e67b7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.854013 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8e99e93-5f83-4568-a186-f712828e67b7-kube-api-access-n4gs4" (OuterVolumeSpecName: "kube-api-access-n4gs4") pod "e8e99e93-5f83-4568-a186-f712828e67b7" (UID: "e8e99e93-5f83-4568-a186-f712828e67b7"). InnerVolumeSpecName "kube-api-access-n4gs4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.854507 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-kube-api-access-6t5lb" (OuterVolumeSpecName: "kube-api-access-6t5lb") pod "5dcdddcc-8bac-4aae-acfa-8f10f49b98d4" (UID: "5dcdddcc-8bac-4aae-acfa-8f10f49b98d4"). InnerVolumeSpecName "kube-api-access-6t5lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.950531 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6t5lb\" (UniqueName: \"kubernetes.io/projected/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-kube-api-access-6t5lb\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.950584 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.950598 4734 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8e99e93-5f83-4568-a186-f712828e67b7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:52 crc kubenswrapper[4734]: I1125 09:50:52.950612 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4gs4\" (UniqueName: \"kubernetes.io/projected/e8e99e93-5f83-4568-a186-f712828e67b7-kube-api-access-n4gs4\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:53 crc kubenswrapper[4734]: I1125 09:50:53.380491 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" Nov 25 09:50:53 crc kubenswrapper[4734]: I1125 09:50:53.380485 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j" event={"ID":"5dcdddcc-8bac-4aae-acfa-8f10f49b98d4","Type":"ContainerDied","Data":"22581b66c09f840963b66ffec84d7b49655ff5ef5924ca57929670f5d72e6829"} Nov 25 09:50:53 crc kubenswrapper[4734]: I1125 09:50:53.380958 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22581b66c09f840963b66ffec84d7b49655ff5ef5924ca57929670f5d72e6829" Nov 25 09:50:53 crc kubenswrapper[4734]: I1125 09:50:53.382644 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-create-mkh4p" event={"ID":"e8e99e93-5f83-4568-a186-f712828e67b7","Type":"ContainerDied","Data":"70f7322628c1c6d5faa465a63540841e46a1ce5882317e9259c0ae2afec308c1"} Nov 25 09:50:53 crc kubenswrapper[4734]: I1125 09:50:53.382669 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70f7322628c1c6d5faa465a63540841e46a1ce5882317e9259c0ae2afec308c1" Nov 25 09:50:53 crc kubenswrapper[4734]: I1125 09:50:53.382675 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-create-mkh4p" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.162615 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-fvjv8"] Nov 25 09:50:55 crc kubenswrapper[4734]: E1125 09:50:55.163075 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8e99e93-5f83-4568-a186-f712828e67b7" containerName="mariadb-database-create" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.163113 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8e99e93-5f83-4568-a186-f712828e67b7" containerName="mariadb-database-create" Nov 25 09:50:55 crc kubenswrapper[4734]: E1125 09:50:55.163135 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dcdddcc-8bac-4aae-acfa-8f10f49b98d4" containerName="mariadb-account-create-update" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.163145 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dcdddcc-8bac-4aae-acfa-8f10f49b98d4" containerName="mariadb-account-create-update" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.163310 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dcdddcc-8bac-4aae-acfa-8f10f49b98d4" containerName="mariadb-account-create-update" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.163341 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8e99e93-5f83-4568-a186-f712828e67b7" containerName="mariadb-database-create" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.164150 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.166211 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.166409 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-4ws7h" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.166670 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.168717 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.170796 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-fvjv8"] Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.187532 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be5a607-1a91-43aa-953f-bb0950d5a9c2-config-data\") pod \"keystone-db-sync-fvjv8\" (UID: \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\") " pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.187659 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmlgb\" (UniqueName: \"kubernetes.io/projected/9be5a607-1a91-43aa-953f-bb0950d5a9c2-kube-api-access-mmlgb\") pod \"keystone-db-sync-fvjv8\" (UID: \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\") " pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.289171 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be5a607-1a91-43aa-953f-bb0950d5a9c2-config-data\") pod \"keystone-db-sync-fvjv8\" (UID: \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\") " pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.289446 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmlgb\" (UniqueName: \"kubernetes.io/projected/9be5a607-1a91-43aa-953f-bb0950d5a9c2-kube-api-access-mmlgb\") pod \"keystone-db-sync-fvjv8\" (UID: \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\") " pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.296649 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be5a607-1a91-43aa-953f-bb0950d5a9c2-config-data\") pod \"keystone-db-sync-fvjv8\" (UID: \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\") " pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.304573 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmlgb\" (UniqueName: \"kubernetes.io/projected/9be5a607-1a91-43aa-953f-bb0950d5a9c2-kube-api-access-mmlgb\") pod \"keystone-db-sync-fvjv8\" (UID: \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\") " pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.480166 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:55 crc kubenswrapper[4734]: I1125 09:50:55.904299 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-fvjv8"] Nov 25 09:50:55 crc kubenswrapper[4734]: W1125 09:50:55.911696 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9be5a607_1a91_43aa_953f_bb0950d5a9c2.slice/crio-53907829df5bf8e88fcbe69ab6f3e17ab41d460845e40376448ab0ac10de4356 WatchSource:0}: Error finding container 53907829df5bf8e88fcbe69ab6f3e17ab41d460845e40376448ab0ac10de4356: Status 404 returned error can't find the container with id 53907829df5bf8e88fcbe69ab6f3e17ab41d460845e40376448ab0ac10de4356 Nov 25 09:50:56 crc kubenswrapper[4734]: I1125 09:50:56.401355 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" event={"ID":"9be5a607-1a91-43aa-953f-bb0950d5a9c2","Type":"ContainerStarted","Data":"daa34881bab8b0efc2e1c69491aa5760bf9ca150c399fc27584bdd3a9e21c50f"} Nov 25 09:50:56 crc kubenswrapper[4734]: I1125 09:50:56.401653 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" event={"ID":"9be5a607-1a91-43aa-953f-bb0950d5a9c2","Type":"ContainerStarted","Data":"53907829df5bf8e88fcbe69ab6f3e17ab41d460845e40376448ab0ac10de4356"} Nov 25 09:50:56 crc kubenswrapper[4734]: I1125 09:50:56.418597 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" podStartSLOduration=1.418578016 podStartE2EDuration="1.418578016s" podCreationTimestamp="2025-11-25 09:50:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:50:56.417441013 +0000 UTC m=+1379.227903007" watchObservedRunningTime="2025-11-25 09:50:56.418578016 +0000 UTC m=+1379.229040010" Nov 25 09:50:58 crc kubenswrapper[4734]: I1125 09:50:58.418873 4734 generic.go:334] "Generic (PLEG): container finished" podID="9be5a607-1a91-43aa-953f-bb0950d5a9c2" containerID="daa34881bab8b0efc2e1c69491aa5760bf9ca150c399fc27584bdd3a9e21c50f" exitCode=0 Nov 25 09:50:58 crc kubenswrapper[4734]: I1125 09:50:58.418927 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" event={"ID":"9be5a607-1a91-43aa-953f-bb0950d5a9c2","Type":"ContainerDied","Data":"daa34881bab8b0efc2e1c69491aa5760bf9ca150c399fc27584bdd3a9e21c50f"} Nov 25 09:50:59 crc kubenswrapper[4734]: I1125 09:50:59.722724 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:50:59 crc kubenswrapper[4734]: I1125 09:50:59.850384 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmlgb\" (UniqueName: \"kubernetes.io/projected/9be5a607-1a91-43aa-953f-bb0950d5a9c2-kube-api-access-mmlgb\") pod \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\" (UID: \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\") " Nov 25 09:50:59 crc kubenswrapper[4734]: I1125 09:50:59.850460 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be5a607-1a91-43aa-953f-bb0950d5a9c2-config-data\") pod \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\" (UID: \"9be5a607-1a91-43aa-953f-bb0950d5a9c2\") " Nov 25 09:50:59 crc kubenswrapper[4734]: I1125 09:50:59.854819 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9be5a607-1a91-43aa-953f-bb0950d5a9c2-kube-api-access-mmlgb" (OuterVolumeSpecName: "kube-api-access-mmlgb") pod "9be5a607-1a91-43aa-953f-bb0950d5a9c2" (UID: "9be5a607-1a91-43aa-953f-bb0950d5a9c2"). InnerVolumeSpecName "kube-api-access-mmlgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:59 crc kubenswrapper[4734]: I1125 09:50:59.880261 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9be5a607-1a91-43aa-953f-bb0950d5a9c2-config-data" (OuterVolumeSpecName: "config-data") pod "9be5a607-1a91-43aa-953f-bb0950d5a9c2" (UID: "9be5a607-1a91-43aa-953f-bb0950d5a9c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:59 crc kubenswrapper[4734]: I1125 09:50:59.952639 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmlgb\" (UniqueName: \"kubernetes.io/projected/9be5a607-1a91-43aa-953f-bb0950d5a9c2-kube-api-access-mmlgb\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:59 crc kubenswrapper[4734]: I1125 09:50:59.952692 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9be5a607-1a91-43aa-953f-bb0950d5a9c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.435206 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" event={"ID":"9be5a607-1a91-43aa-953f-bb0950d5a9c2","Type":"ContainerDied","Data":"53907829df5bf8e88fcbe69ab6f3e17ab41d460845e40376448ab0ac10de4356"} Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.435237 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-db-sync-fvjv8" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.435252 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53907829df5bf8e88fcbe69ab6f3e17ab41d460845e40376448ab0ac10de4356" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.606338 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-942pc"] Nov 25 09:51:00 crc kubenswrapper[4734]: E1125 09:51:00.606635 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9be5a607-1a91-43aa-953f-bb0950d5a9c2" containerName="keystone-db-sync" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.606653 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="9be5a607-1a91-43aa-953f-bb0950d5a9c2" containerName="keystone-db-sync" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.606781 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="9be5a607-1a91-43aa-953f-bb0950d5a9c2" containerName="keystone-db-sync" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.607292 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.609823 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.609869 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-4ws7h" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.609886 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"osp-secret" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.610343 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.610715 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.624484 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-942pc"] Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.764061 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbkh9\" (UniqueName: \"kubernetes.io/projected/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-kube-api-access-hbkh9\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.764127 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-scripts\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.764257 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-fernet-keys\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.764274 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-credential-keys\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.764324 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-config-data\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.866181 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-scripts\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.866242 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-fernet-keys\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.866260 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-credential-keys\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.866295 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-config-data\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.866346 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbkh9\" (UniqueName: \"kubernetes.io/projected/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-kube-api-access-hbkh9\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.871304 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-scripts\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.871605 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-fernet-keys\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.871778 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-config-data\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.877064 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-credential-keys\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.881399 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbkh9\" (UniqueName: \"kubernetes.io/projected/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-kube-api-access-hbkh9\") pod \"keystone-bootstrap-942pc\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:00 crc kubenswrapper[4734]: I1125 09:51:00.931851 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:01 crc kubenswrapper[4734]: I1125 09:51:01.348743 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-942pc"] Nov 25 09:51:01 crc kubenswrapper[4734]: W1125 09:51:01.355665 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2b5a9bc_be61_416c_aa03_9a4a93b2b861.slice/crio-727561bfc8449ff03432022143f858c6451a715f5b03461b9c7145ca74f95d13 WatchSource:0}: Error finding container 727561bfc8449ff03432022143f858c6451a715f5b03461b9c7145ca74f95d13: Status 404 returned error can't find the container with id 727561bfc8449ff03432022143f858c6451a715f5b03461b9c7145ca74f95d13 Nov 25 09:51:01 crc kubenswrapper[4734]: I1125 09:51:01.442288 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" event={"ID":"e2b5a9bc-be61-416c-aa03-9a4a93b2b861","Type":"ContainerStarted","Data":"727561bfc8449ff03432022143f858c6451a715f5b03461b9c7145ca74f95d13"} Nov 25 09:51:02 crc kubenswrapper[4734]: I1125 09:51:02.451228 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" event={"ID":"e2b5a9bc-be61-416c-aa03-9a4a93b2b861","Type":"ContainerStarted","Data":"f3e2e7ae79c4fad99eab62265ca9b4e046eb50f8d2563212be819e484f8e2bc7"} Nov 25 09:51:02 crc kubenswrapper[4734]: I1125 09:51:02.469063 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" podStartSLOduration=2.469040842 podStartE2EDuration="2.469040842s" podCreationTimestamp="2025-11-25 09:51:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:51:02.468560408 +0000 UTC m=+1385.279022402" watchObservedRunningTime="2025-11-25 09:51:02.469040842 +0000 UTC m=+1385.279502836" Nov 25 09:51:05 crc kubenswrapper[4734]: I1125 09:51:05.774291 4734 generic.go:334] "Generic (PLEG): container finished" podID="e2b5a9bc-be61-416c-aa03-9a4a93b2b861" containerID="f3e2e7ae79c4fad99eab62265ca9b4e046eb50f8d2563212be819e484f8e2bc7" exitCode=0 Nov 25 09:51:05 crc kubenswrapper[4734]: I1125 09:51:05.774694 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" event={"ID":"e2b5a9bc-be61-416c-aa03-9a4a93b2b861","Type":"ContainerDied","Data":"f3e2e7ae79c4fad99eab62265ca9b4e046eb50f8d2563212be819e484f8e2bc7"} Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.017078 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.189284 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-fernet-keys\") pod \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.189379 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-credential-keys\") pod \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.189492 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbkh9\" (UniqueName: \"kubernetes.io/projected/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-kube-api-access-hbkh9\") pod \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.190303 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-scripts\") pod \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.190470 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-config-data\") pod \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\" (UID: \"e2b5a9bc-be61-416c-aa03-9a4a93b2b861\") " Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.195336 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "e2b5a9bc-be61-416c-aa03-9a4a93b2b861" (UID: "e2b5a9bc-be61-416c-aa03-9a4a93b2b861"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.195390 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "e2b5a9bc-be61-416c-aa03-9a4a93b2b861" (UID: "e2b5a9bc-be61-416c-aa03-9a4a93b2b861"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.196915 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-kube-api-access-hbkh9" (OuterVolumeSpecName: "kube-api-access-hbkh9") pod "e2b5a9bc-be61-416c-aa03-9a4a93b2b861" (UID: "e2b5a9bc-be61-416c-aa03-9a4a93b2b861"). InnerVolumeSpecName "kube-api-access-hbkh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.197626 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-scripts" (OuterVolumeSpecName: "scripts") pod "e2b5a9bc-be61-416c-aa03-9a4a93b2b861" (UID: "e2b5a9bc-be61-416c-aa03-9a4a93b2b861"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.218167 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-config-data" (OuterVolumeSpecName: "config-data") pod "e2b5a9bc-be61-416c-aa03-9a4a93b2b861" (UID: "e2b5a9bc-be61-416c-aa03-9a4a93b2b861"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.292116 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.292161 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.292180 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbkh9\" (UniqueName: \"kubernetes.io/projected/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-kube-api-access-hbkh9\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.292191 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.292203 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2b5a9bc-be61-416c-aa03-9a4a93b2b861-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.793829 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" event={"ID":"e2b5a9bc-be61-416c-aa03-9a4a93b2b861","Type":"ContainerDied","Data":"727561bfc8449ff03432022143f858c6451a715f5b03461b9c7145ca74f95d13"} Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.793873 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="727561bfc8449ff03432022143f858c6451a715f5b03461b9c7145ca74f95d13" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.793880 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-bootstrap-942pc" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.861787 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-55c98d597d-78j2k"] Nov 25 09:51:07 crc kubenswrapper[4734]: E1125 09:51:07.862119 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2b5a9bc-be61-416c-aa03-9a4a93b2b861" containerName="keystone-bootstrap" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.862152 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2b5a9bc-be61-416c-aa03-9a4a93b2b861" containerName="keystone-bootstrap" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.862378 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2b5a9bc-be61-416c-aa03-9a4a93b2b861" containerName="keystone-bootstrap" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.862848 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.864712 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.868202 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.868483 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-4ws7h" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.868728 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 09:51:07 crc kubenswrapper[4734]: I1125 09:51:07.873029 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-55c98d597d-78j2k"] Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.000140 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45g9x\" (UniqueName: \"kubernetes.io/projected/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-kube-api-access-45g9x\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.000221 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.000241 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.000259 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.000294 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.101398 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45g9x\" (UniqueName: \"kubernetes.io/projected/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-kube-api-access-45g9x\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.101464 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.101485 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.101501 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.101536 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.105281 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.105593 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.105728 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.115464 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.117316 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45g9x\" (UniqueName: \"kubernetes.io/projected/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-kube-api-access-45g9x\") pod \"keystone-55c98d597d-78j2k\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.177705 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.600175 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-55c98d597d-78j2k"] Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.801958 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" event={"ID":"36c08187-0f8e-495f-8cf9-dfbdf7c825d1","Type":"ContainerStarted","Data":"344a2f0495c327ad8325a37efa5c2d5bacc8b089d7af8b77b6669aa1b4649ecc"} Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.802004 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" event={"ID":"36c08187-0f8e-495f-8cf9-dfbdf7c825d1","Type":"ContainerStarted","Data":"173cb7a750a2455ce1cdf9ce09f149d2aaf64aaa6ef5a8a314c6c9a9f1145c8d"} Nov 25 09:51:08 crc kubenswrapper[4734]: I1125 09:51:08.802077 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:20 crc kubenswrapper[4734]: I1125 09:51:20.695813 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:51:20 crc kubenswrapper[4734]: I1125 09:51:20.696488 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:51:20 crc kubenswrapper[4734]: I1125 09:51:20.696560 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:51:20 crc kubenswrapper[4734]: I1125 09:51:20.697148 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"90ed44f721fb21f7c9a46b3cc3ce395345dcb1bd87653d89f7c1d2cf3cd435fb"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:51:20 crc kubenswrapper[4734]: I1125 09:51:20.697196 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://90ed44f721fb21f7c9a46b3cc3ce395345dcb1bd87653d89f7c1d2cf3cd435fb" gracePeriod=600 Nov 25 09:51:20 crc kubenswrapper[4734]: I1125 09:51:20.893298 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="90ed44f721fb21f7c9a46b3cc3ce395345dcb1bd87653d89f7c1d2cf3cd435fb" exitCode=0 Nov 25 09:51:20 crc kubenswrapper[4734]: I1125 09:51:20.893338 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"90ed44f721fb21f7c9a46b3cc3ce395345dcb1bd87653d89f7c1d2cf3cd435fb"} Nov 25 09:51:20 crc kubenswrapper[4734]: I1125 09:51:20.893369 4734 scope.go:117] "RemoveContainer" containerID="b897de4eab07f171dfe6b1c309559bbe3bc70bbec2b52f3e83fb0315718594f5" Nov 25 09:51:21 crc kubenswrapper[4734]: I1125 09:51:21.902696 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db"} Nov 25 09:51:21 crc kubenswrapper[4734]: I1125 09:51:21.922554 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" podStartSLOduration=14.922536587 podStartE2EDuration="14.922536587s" podCreationTimestamp="2025-11-25 09:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:51:08.824128548 +0000 UTC m=+1391.634590552" watchObservedRunningTime="2025-11-25 09:51:21.922536587 +0000 UTC m=+1404.732998581" Nov 25 09:51:39 crc kubenswrapper[4734]: I1125 09:51:39.690681 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.872046 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/openstackclient"] Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.873920 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.876972 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"openstack-config-secret" Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.877174 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"openstack-config" Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.877186 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"default-dockercfg-tqp2f" Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.882498 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/openstackclient"] Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.975409 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config-secret\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.975493 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7cv2\" (UniqueName: \"kubernetes.io/projected/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-kube-api-access-x7cv2\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:40 crc kubenswrapper[4734]: I1125 09:51:40.975572 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.076956 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7cv2\" (UniqueName: \"kubernetes.io/projected/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-kube-api-access-x7cv2\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.077053 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.077121 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config-secret\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.078192 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.085586 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config-secret\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.096121 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7cv2\" (UniqueName: \"kubernetes.io/projected/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-kube-api-access-x7cv2\") pod \"openstackclient\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.196532 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstackclient" Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.602833 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/openstackclient"] Nov 25 09:51:41 crc kubenswrapper[4734]: W1125 09:51:41.610646 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8cbad043_b8dd_44b6_81dd_0f9bdbb7d9bc.slice/crio-12b7f2ece0f5806b2467efa7ae288bb54a054c84457f9a54de02b70acbc060bc WatchSource:0}: Error finding container 12b7f2ece0f5806b2467efa7ae288bb54a054c84457f9a54de02b70acbc060bc: Status 404 returned error can't find the container with id 12b7f2ece0f5806b2467efa7ae288bb54a054c84457f9a54de02b70acbc060bc Nov 25 09:51:41 crc kubenswrapper[4734]: I1125 09:51:41.614107 4734 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:51:42 crc kubenswrapper[4734]: I1125 09:51:42.058176 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstackclient" event={"ID":"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc","Type":"ContainerStarted","Data":"12b7f2ece0f5806b2467efa7ae288bb54a054c84457f9a54de02b70acbc060bc"} Nov 25 09:51:49 crc kubenswrapper[4734]: I1125 09:51:49.108951 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstackclient" event={"ID":"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc","Type":"ContainerStarted","Data":"a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325"} Nov 25 09:51:49 crc kubenswrapper[4734]: I1125 09:51:49.128261 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/openstackclient" podStartSLOduration=2.234264785 podStartE2EDuration="9.128243422s" podCreationTimestamp="2025-11-25 09:51:40 +0000 UTC" firstStartedPulling="2025-11-25 09:51:41.613790158 +0000 UTC m=+1424.424252162" lastFinishedPulling="2025-11-25 09:51:48.507768805 +0000 UTC m=+1431.318230799" observedRunningTime="2025-11-25 09:51:49.124667289 +0000 UTC m=+1431.935129313" watchObservedRunningTime="2025-11-25 09:51:49.128243422 +0000 UTC m=+1431.938705416" Nov 25 09:52:00 crc kubenswrapper[4734]: I1125 09:52:00.875592 4734 scope.go:117] "RemoveContainer" containerID="54a8e85b7b1989eb0e3dab80ef757564e03cb8990e886cd86658220c888eb628" Nov 25 09:52:00 crc kubenswrapper[4734]: I1125 09:52:00.902783 4734 scope.go:117] "RemoveContainer" containerID="f8da931fe99629a6788f6c1487b603e2759679a850fe64d151a0a47b8b5913d6" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.177285 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q9xwk"] Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.179252 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.187971 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q9xwk"] Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.342517 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-utilities\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.342992 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ldcc\" (UniqueName: \"kubernetes.io/projected/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-kube-api-access-5ldcc\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.343044 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-catalog-content\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.444754 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-utilities\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.444816 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ldcc\" (UniqueName: \"kubernetes.io/projected/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-kube-api-access-5ldcc\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.445460 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-utilities\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.444890 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-catalog-content\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.445510 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-catalog-content\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.463401 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ldcc\" (UniqueName: \"kubernetes.io/projected/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-kube-api-access-5ldcc\") pod \"community-operators-q9xwk\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.503310 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:37 crc kubenswrapper[4734]: I1125 09:52:37.829581 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q9xwk"] Nov 25 09:52:38 crc kubenswrapper[4734]: I1125 09:52:38.485629 4734 generic.go:334] "Generic (PLEG): container finished" podID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerID="cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57" exitCode=0 Nov 25 09:52:38 crc kubenswrapper[4734]: I1125 09:52:38.485690 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9xwk" event={"ID":"fea55fd9-95ec-46e9-a4e2-baf7190be6ff","Type":"ContainerDied","Data":"cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57"} Nov 25 09:52:38 crc kubenswrapper[4734]: I1125 09:52:38.486447 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9xwk" event={"ID":"fea55fd9-95ec-46e9-a4e2-baf7190be6ff","Type":"ContainerStarted","Data":"ca050cf6d80cc73f4258b550e935da91fc73a820303e5df6be6c2764a7f10704"} Nov 25 09:52:39 crc kubenswrapper[4734]: I1125 09:52:39.495834 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9xwk" event={"ID":"fea55fd9-95ec-46e9-a4e2-baf7190be6ff","Type":"ContainerStarted","Data":"3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8"} Nov 25 09:52:40 crc kubenswrapper[4734]: I1125 09:52:40.507449 4734 generic.go:334] "Generic (PLEG): container finished" podID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerID="3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8" exitCode=0 Nov 25 09:52:40 crc kubenswrapper[4734]: I1125 09:52:40.507523 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9xwk" event={"ID":"fea55fd9-95ec-46e9-a4e2-baf7190be6ff","Type":"ContainerDied","Data":"3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8"} Nov 25 09:52:41 crc kubenswrapper[4734]: I1125 09:52:41.517396 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9xwk" event={"ID":"fea55fd9-95ec-46e9-a4e2-baf7190be6ff","Type":"ContainerStarted","Data":"9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4"} Nov 25 09:52:41 crc kubenswrapper[4734]: I1125 09:52:41.538381 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q9xwk" podStartSLOduration=2.14480082 podStartE2EDuration="4.538354374s" podCreationTimestamp="2025-11-25 09:52:37 +0000 UTC" firstStartedPulling="2025-11-25 09:52:38.488982832 +0000 UTC m=+1481.299444866" lastFinishedPulling="2025-11-25 09:52:40.882536426 +0000 UTC m=+1483.692998420" observedRunningTime="2025-11-25 09:52:41.536438279 +0000 UTC m=+1484.346900293" watchObservedRunningTime="2025-11-25 09:52:41.538354374 +0000 UTC m=+1484.348816368" Nov 25 09:52:47 crc kubenswrapper[4734]: I1125 09:52:47.504481 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:47 crc kubenswrapper[4734]: I1125 09:52:47.505466 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:47 crc kubenswrapper[4734]: I1125 09:52:47.543645 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:47 crc kubenswrapper[4734]: I1125 09:52:47.602544 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:47 crc kubenswrapper[4734]: I1125 09:52:47.774455 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q9xwk"] Nov 25 09:52:49 crc kubenswrapper[4734]: I1125 09:52:49.568077 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q9xwk" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerName="registry-server" containerID="cri-o://9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4" gracePeriod=2 Nov 25 09:52:49 crc kubenswrapper[4734]: I1125 09:52:49.931380 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.117659 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ldcc\" (UniqueName: \"kubernetes.io/projected/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-kube-api-access-5ldcc\") pod \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.117726 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-utilities\") pod \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.117844 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-catalog-content\") pod \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\" (UID: \"fea55fd9-95ec-46e9-a4e2-baf7190be6ff\") " Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.118763 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-utilities" (OuterVolumeSpecName: "utilities") pod "fea55fd9-95ec-46e9-a4e2-baf7190be6ff" (UID: "fea55fd9-95ec-46e9-a4e2-baf7190be6ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.123789 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-kube-api-access-5ldcc" (OuterVolumeSpecName: "kube-api-access-5ldcc") pod "fea55fd9-95ec-46e9-a4e2-baf7190be6ff" (UID: "fea55fd9-95ec-46e9-a4e2-baf7190be6ff"). InnerVolumeSpecName "kube-api-access-5ldcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.219138 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ldcc\" (UniqueName: \"kubernetes.io/projected/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-kube-api-access-5ldcc\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.219174 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.577460 4734 generic.go:334] "Generic (PLEG): container finished" podID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerID="9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4" exitCode=0 Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.577518 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9xwk" event={"ID":"fea55fd9-95ec-46e9-a4e2-baf7190be6ff","Type":"ContainerDied","Data":"9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4"} Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.577538 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q9xwk" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.577556 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q9xwk" event={"ID":"fea55fd9-95ec-46e9-a4e2-baf7190be6ff","Type":"ContainerDied","Data":"ca050cf6d80cc73f4258b550e935da91fc73a820303e5df6be6c2764a7f10704"} Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.577609 4734 scope.go:117] "RemoveContainer" containerID="9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.600753 4734 scope.go:117] "RemoveContainer" containerID="3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.620739 4734 scope.go:117] "RemoveContainer" containerID="cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.653308 4734 scope.go:117] "RemoveContainer" containerID="9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4" Nov 25 09:52:50 crc kubenswrapper[4734]: E1125 09:52:50.653763 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4\": container with ID starting with 9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4 not found: ID does not exist" containerID="9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.653832 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4"} err="failed to get container status \"9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4\": rpc error: code = NotFound desc = could not find container \"9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4\": container with ID starting with 9c4235d2f171612a491f53aa1d6157d4fff58acd231aa0c9a97816f920e791e4 not found: ID does not exist" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.653874 4734 scope.go:117] "RemoveContainer" containerID="3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8" Nov 25 09:52:50 crc kubenswrapper[4734]: E1125 09:52:50.654275 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8\": container with ID starting with 3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8 not found: ID does not exist" containerID="3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.654329 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8"} err="failed to get container status \"3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8\": rpc error: code = NotFound desc = could not find container \"3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8\": container with ID starting with 3de3dba391ec362e4556dfc0b8b61d218df58f92fea9d73409f38806b366bee8 not found: ID does not exist" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.654352 4734 scope.go:117] "RemoveContainer" containerID="cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57" Nov 25 09:52:50 crc kubenswrapper[4734]: E1125 09:52:50.654687 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57\": container with ID starting with cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57 not found: ID does not exist" containerID="cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.654732 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57"} err="failed to get container status \"cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57\": rpc error: code = NotFound desc = could not find container \"cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57\": container with ID starting with cfb2b669d0da7339fab69ac2aee0d2a7339bd4347c7e823c1042fef008cd8b57 not found: ID does not exist" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.761881 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fea55fd9-95ec-46e9-a4e2-baf7190be6ff" (UID: "fea55fd9-95ec-46e9-a4e2-baf7190be6ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.827543 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fea55fd9-95ec-46e9-a4e2-baf7190be6ff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.910626 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q9xwk"] Nov 25 09:52:50 crc kubenswrapper[4734]: I1125 09:52:50.914388 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q9xwk"] Nov 25 09:52:52 crc kubenswrapper[4734]: I1125 09:52:52.254705 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" path="/var/lib/kubelet/pods/fea55fd9-95ec-46e9-a4e2-baf7190be6ff/volumes" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.821042 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pdqhp"] Nov 25 09:52:58 crc kubenswrapper[4734]: E1125 09:52:58.821942 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerName="registry-server" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.821956 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerName="registry-server" Nov 25 09:52:58 crc kubenswrapper[4734]: E1125 09:52:58.821971 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerName="extract-utilities" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.821980 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerName="extract-utilities" Nov 25 09:52:58 crc kubenswrapper[4734]: E1125 09:52:58.821995 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerName="extract-content" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.822004 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerName="extract-content" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.822160 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="fea55fd9-95ec-46e9-a4e2-baf7190be6ff" containerName="registry-server" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.823631 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.830971 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pdqhp"] Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.852439 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-catalog-content\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.852571 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-utilities\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.852610 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbxcn\" (UniqueName: \"kubernetes.io/projected/2039b923-9680-4d4c-944d-3293e29be75e-kube-api-access-hbxcn\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.953890 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-catalog-content\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.953974 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-utilities\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.953995 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbxcn\" (UniqueName: \"kubernetes.io/projected/2039b923-9680-4d4c-944d-3293e29be75e-kube-api-access-hbxcn\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.954451 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-catalog-content\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.954463 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-utilities\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:58 crc kubenswrapper[4734]: I1125 09:52:58.975537 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbxcn\" (UniqueName: \"kubernetes.io/projected/2039b923-9680-4d4c-944d-3293e29be75e-kube-api-access-hbxcn\") pod \"redhat-operators-pdqhp\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:59 crc kubenswrapper[4734]: I1125 09:52:59.150551 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:52:59 crc kubenswrapper[4734]: I1125 09:52:59.375008 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pdqhp"] Nov 25 09:52:59 crc kubenswrapper[4734]: I1125 09:52:59.648401 4734 generic.go:334] "Generic (PLEG): container finished" podID="2039b923-9680-4d4c-944d-3293e29be75e" containerID="c8245b5d0750db5c378d62e5e972c6a8b619fd2e5a38e9a1b38c45a976d038c2" exitCode=0 Nov 25 09:52:59 crc kubenswrapper[4734]: I1125 09:52:59.648593 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pdqhp" event={"ID":"2039b923-9680-4d4c-944d-3293e29be75e","Type":"ContainerDied","Data":"c8245b5d0750db5c378d62e5e972c6a8b619fd2e5a38e9a1b38c45a976d038c2"} Nov 25 09:52:59 crc kubenswrapper[4734]: I1125 09:52:59.648759 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pdqhp" event={"ID":"2039b923-9680-4d4c-944d-3293e29be75e","Type":"ContainerStarted","Data":"b8cad115b343a970e1b982bb6fe1a190a1f264f26fae40d6356337fc15fe81c2"} Nov 25 09:53:00 crc kubenswrapper[4734]: I1125 09:53:00.965203 4734 scope.go:117] "RemoveContainer" containerID="0263c217aa4af48ba1112906424c64eef94a608411c74505798f73a5e8676727" Nov 25 09:53:00 crc kubenswrapper[4734]: I1125 09:53:00.996905 4734 scope.go:117] "RemoveContainer" containerID="d6868ecb85c915efcdd01fb9921bc21552a1d97274931871e14ddf6afa7ffd63" Nov 25 09:53:01 crc kubenswrapper[4734]: I1125 09:53:01.667224 4734 generic.go:334] "Generic (PLEG): container finished" podID="2039b923-9680-4d4c-944d-3293e29be75e" containerID="ba20ec7a6e962896970be24077ce4d5c077c4579adf7907019bc4f200d7203f3" exitCode=0 Nov 25 09:53:01 crc kubenswrapper[4734]: I1125 09:53:01.667304 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pdqhp" event={"ID":"2039b923-9680-4d4c-944d-3293e29be75e","Type":"ContainerDied","Data":"ba20ec7a6e962896970be24077ce4d5c077c4579adf7907019bc4f200d7203f3"} Nov 25 09:53:02 crc kubenswrapper[4734]: I1125 09:53:02.675907 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pdqhp" event={"ID":"2039b923-9680-4d4c-944d-3293e29be75e","Type":"ContainerStarted","Data":"1a0c2a7d493de6dea806515b91d870c7b1d0686129b3805b6d465f47f4d8ff32"} Nov 25 09:53:09 crc kubenswrapper[4734]: I1125 09:53:09.151805 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:53:09 crc kubenswrapper[4734]: I1125 09:53:09.152701 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:53:09 crc kubenswrapper[4734]: I1125 09:53:09.217720 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:53:09 crc kubenswrapper[4734]: I1125 09:53:09.240967 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pdqhp" podStartSLOduration=8.80275089 podStartE2EDuration="11.240916288s" podCreationTimestamp="2025-11-25 09:52:58 +0000 UTC" firstStartedPulling="2025-11-25 09:52:59.649886822 +0000 UTC m=+1502.460348816" lastFinishedPulling="2025-11-25 09:53:02.08805221 +0000 UTC m=+1504.898514214" observedRunningTime="2025-11-25 09:53:02.69261659 +0000 UTC m=+1505.503078584" watchObservedRunningTime="2025-11-25 09:53:09.240916288 +0000 UTC m=+1512.051378302" Nov 25 09:53:09 crc kubenswrapper[4734]: I1125 09:53:09.777812 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:53:09 crc kubenswrapper[4734]: I1125 09:53:09.831767 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pdqhp"] Nov 25 09:53:11 crc kubenswrapper[4734]: I1125 09:53:11.748990 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pdqhp" podUID="2039b923-9680-4d4c-944d-3293e29be75e" containerName="registry-server" containerID="cri-o://1a0c2a7d493de6dea806515b91d870c7b1d0686129b3805b6d465f47f4d8ff32" gracePeriod=2 Nov 25 09:53:13 crc kubenswrapper[4734]: I1125 09:53:13.801702 4734 generic.go:334] "Generic (PLEG): container finished" podID="2039b923-9680-4d4c-944d-3293e29be75e" containerID="1a0c2a7d493de6dea806515b91d870c7b1d0686129b3805b6d465f47f4d8ff32" exitCode=0 Nov 25 09:53:13 crc kubenswrapper[4734]: I1125 09:53:13.801798 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pdqhp" event={"ID":"2039b923-9680-4d4c-944d-3293e29be75e","Type":"ContainerDied","Data":"1a0c2a7d493de6dea806515b91d870c7b1d0686129b3805b6d465f47f4d8ff32"} Nov 25 09:53:13 crc kubenswrapper[4734]: I1125 09:53:13.953884 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.099073 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-utilities\") pod \"2039b923-9680-4d4c-944d-3293e29be75e\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.099165 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbxcn\" (UniqueName: \"kubernetes.io/projected/2039b923-9680-4d4c-944d-3293e29be75e-kube-api-access-hbxcn\") pod \"2039b923-9680-4d4c-944d-3293e29be75e\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.099292 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-catalog-content\") pod \"2039b923-9680-4d4c-944d-3293e29be75e\" (UID: \"2039b923-9680-4d4c-944d-3293e29be75e\") " Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.099960 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-utilities" (OuterVolumeSpecName: "utilities") pod "2039b923-9680-4d4c-944d-3293e29be75e" (UID: "2039b923-9680-4d4c-944d-3293e29be75e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.104502 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2039b923-9680-4d4c-944d-3293e29be75e-kube-api-access-hbxcn" (OuterVolumeSpecName: "kube-api-access-hbxcn") pod "2039b923-9680-4d4c-944d-3293e29be75e" (UID: "2039b923-9680-4d4c-944d-3293e29be75e"). InnerVolumeSpecName "kube-api-access-hbxcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.181187 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2039b923-9680-4d4c-944d-3293e29be75e" (UID: "2039b923-9680-4d4c-944d-3293e29be75e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.201397 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.201437 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbxcn\" (UniqueName: \"kubernetes.io/projected/2039b923-9680-4d4c-944d-3293e29be75e-kube-api-access-hbxcn\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.201448 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2039b923-9680-4d4c-944d-3293e29be75e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.810335 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pdqhp" event={"ID":"2039b923-9680-4d4c-944d-3293e29be75e","Type":"ContainerDied","Data":"b8cad115b343a970e1b982bb6fe1a190a1f264f26fae40d6356337fc15fe81c2"} Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.810407 4734 scope.go:117] "RemoveContainer" containerID="1a0c2a7d493de6dea806515b91d870c7b1d0686129b3805b6d465f47f4d8ff32" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.810443 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pdqhp" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.831609 4734 scope.go:117] "RemoveContainer" containerID="ba20ec7a6e962896970be24077ce4d5c077c4579adf7907019bc4f200d7203f3" Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.839171 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pdqhp"] Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.843066 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pdqhp"] Nov 25 09:53:14 crc kubenswrapper[4734]: I1125 09:53:14.856995 4734 scope.go:117] "RemoveContainer" containerID="c8245b5d0750db5c378d62e5e972c6a8b619fd2e5a38e9a1b38c45a976d038c2" Nov 25 09:53:16 crc kubenswrapper[4734]: I1125 09:53:16.257393 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2039b923-9680-4d4c-944d-3293e29be75e" path="/var/lib/kubelet/pods/2039b923-9680-4d4c-944d-3293e29be75e/volumes" Nov 25 09:53:20 crc kubenswrapper[4734]: I1125 09:53:20.695599 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:53:20 crc kubenswrapper[4734]: I1125 09:53:20.695956 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.479700 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-974q5"] Nov 25 09:53:29 crc kubenswrapper[4734]: E1125 09:53:29.480343 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2039b923-9680-4d4c-944d-3293e29be75e" containerName="registry-server" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.480358 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="2039b923-9680-4d4c-944d-3293e29be75e" containerName="registry-server" Nov 25 09:53:29 crc kubenswrapper[4734]: E1125 09:53:29.480367 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2039b923-9680-4d4c-944d-3293e29be75e" containerName="extract-content" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.480372 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="2039b923-9680-4d4c-944d-3293e29be75e" containerName="extract-content" Nov 25 09:53:29 crc kubenswrapper[4734]: E1125 09:53:29.480392 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2039b923-9680-4d4c-944d-3293e29be75e" containerName="extract-utilities" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.480398 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="2039b923-9680-4d4c-944d-3293e29be75e" containerName="extract-utilities" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.480511 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="2039b923-9680-4d4c-944d-3293e29be75e" containerName="registry-server" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.481338 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.494968 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-974q5"] Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.532730 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-catalog-content\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.532805 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7qj6\" (UniqueName: \"kubernetes.io/projected/1263d134-da0e-42d3-88b4-6b70edae22e0-kube-api-access-c7qj6\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.532836 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-utilities\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.633941 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-catalog-content\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.634013 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-utilities\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.634043 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7qj6\" (UniqueName: \"kubernetes.io/projected/1263d134-da0e-42d3-88b4-6b70edae22e0-kube-api-access-c7qj6\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.634813 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-catalog-content\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.634864 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-utilities\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.663303 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7qj6\" (UniqueName: \"kubernetes.io/projected/1263d134-da0e-42d3-88b4-6b70edae22e0-kube-api-access-c7qj6\") pod \"redhat-marketplace-974q5\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:29 crc kubenswrapper[4734]: I1125 09:53:29.802330 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:30 crc kubenswrapper[4734]: I1125 09:53:30.047795 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-974q5"] Nov 25 09:53:30 crc kubenswrapper[4734]: I1125 09:53:30.944855 4734 generic.go:334] "Generic (PLEG): container finished" podID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerID="feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186" exitCode=0 Nov 25 09:53:30 crc kubenswrapper[4734]: I1125 09:53:30.944978 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974q5" event={"ID":"1263d134-da0e-42d3-88b4-6b70edae22e0","Type":"ContainerDied","Data":"feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186"} Nov 25 09:53:30 crc kubenswrapper[4734]: I1125 09:53:30.946686 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974q5" event={"ID":"1263d134-da0e-42d3-88b4-6b70edae22e0","Type":"ContainerStarted","Data":"a40611c39472be9b89e2ce5fcc9c60737fe0db083ac75c8f26a0803f0334b054"} Nov 25 09:53:31 crc kubenswrapper[4734]: I1125 09:53:31.958549 4734 generic.go:334] "Generic (PLEG): container finished" podID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerID="6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41" exitCode=0 Nov 25 09:53:31 crc kubenswrapper[4734]: I1125 09:53:31.958638 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974q5" event={"ID":"1263d134-da0e-42d3-88b4-6b70edae22e0","Type":"ContainerDied","Data":"6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41"} Nov 25 09:53:32 crc kubenswrapper[4734]: I1125 09:53:32.972812 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974q5" event={"ID":"1263d134-da0e-42d3-88b4-6b70edae22e0","Type":"ContainerStarted","Data":"c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c"} Nov 25 09:53:33 crc kubenswrapper[4734]: I1125 09:53:33.001359 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-974q5" podStartSLOduration=2.468486395 podStartE2EDuration="4.001331828s" podCreationTimestamp="2025-11-25 09:53:29 +0000 UTC" firstStartedPulling="2025-11-25 09:53:30.947984449 +0000 UTC m=+1533.758446453" lastFinishedPulling="2025-11-25 09:53:32.480829892 +0000 UTC m=+1535.291291886" observedRunningTime="2025-11-25 09:53:32.9986436 +0000 UTC m=+1535.809105644" watchObservedRunningTime="2025-11-25 09:53:33.001331828 +0000 UTC m=+1535.811793882" Nov 25 09:53:39 crc kubenswrapper[4734]: I1125 09:53:39.803330 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:39 crc kubenswrapper[4734]: I1125 09:53:39.804276 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:39 crc kubenswrapper[4734]: I1125 09:53:39.855840 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:40 crc kubenswrapper[4734]: I1125 09:53:40.074450 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:40 crc kubenswrapper[4734]: I1125 09:53:40.119434 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-974q5"] Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.040126 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-974q5" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerName="registry-server" containerID="cri-o://c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c" gracePeriod=2 Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.473528 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.637422 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7qj6\" (UniqueName: \"kubernetes.io/projected/1263d134-da0e-42d3-88b4-6b70edae22e0-kube-api-access-c7qj6\") pod \"1263d134-da0e-42d3-88b4-6b70edae22e0\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.637498 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-utilities\") pod \"1263d134-da0e-42d3-88b4-6b70edae22e0\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.637572 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-catalog-content\") pod \"1263d134-da0e-42d3-88b4-6b70edae22e0\" (UID: \"1263d134-da0e-42d3-88b4-6b70edae22e0\") " Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.638994 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-utilities" (OuterVolumeSpecName: "utilities") pod "1263d134-da0e-42d3-88b4-6b70edae22e0" (UID: "1263d134-da0e-42d3-88b4-6b70edae22e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.647802 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1263d134-da0e-42d3-88b4-6b70edae22e0-kube-api-access-c7qj6" (OuterVolumeSpecName: "kube-api-access-c7qj6") pod "1263d134-da0e-42d3-88b4-6b70edae22e0" (UID: "1263d134-da0e-42d3-88b4-6b70edae22e0"). InnerVolumeSpecName "kube-api-access-c7qj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.670071 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1263d134-da0e-42d3-88b4-6b70edae22e0" (UID: "1263d134-da0e-42d3-88b4-6b70edae22e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.738352 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7qj6\" (UniqueName: \"kubernetes.io/projected/1263d134-da0e-42d3-88b4-6b70edae22e0-kube-api-access-c7qj6\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.738396 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:42 crc kubenswrapper[4734]: I1125 09:53:42.738410 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1263d134-da0e-42d3-88b4-6b70edae22e0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.051079 4734 generic.go:334] "Generic (PLEG): container finished" podID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerID="c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c" exitCode=0 Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.051199 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974q5" event={"ID":"1263d134-da0e-42d3-88b4-6b70edae22e0","Type":"ContainerDied","Data":"c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c"} Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.051226 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-974q5" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.051277 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-974q5" event={"ID":"1263d134-da0e-42d3-88b4-6b70edae22e0","Type":"ContainerDied","Data":"a40611c39472be9b89e2ce5fcc9c60737fe0db083ac75c8f26a0803f0334b054"} Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.051328 4734 scope.go:117] "RemoveContainer" containerID="c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.080186 4734 scope.go:117] "RemoveContainer" containerID="6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.109005 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-974q5"] Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.111074 4734 scope.go:117] "RemoveContainer" containerID="feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.120847 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-974q5"] Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.141338 4734 scope.go:117] "RemoveContainer" containerID="c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c" Nov 25 09:53:43 crc kubenswrapper[4734]: E1125 09:53:43.142565 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c\": container with ID starting with c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c not found: ID does not exist" containerID="c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.142621 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c"} err="failed to get container status \"c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c\": rpc error: code = NotFound desc = could not find container \"c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c\": container with ID starting with c64137d010f4c140471a3684690b3d5724b7df111ed2464f485128110cb3449c not found: ID does not exist" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.142660 4734 scope.go:117] "RemoveContainer" containerID="6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41" Nov 25 09:53:43 crc kubenswrapper[4734]: E1125 09:53:43.143344 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41\": container with ID starting with 6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41 not found: ID does not exist" containerID="6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.143380 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41"} err="failed to get container status \"6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41\": rpc error: code = NotFound desc = could not find container \"6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41\": container with ID starting with 6a6dd7e490f1deb77e1c89f86b64bdda2864d86cbf24ad8864247e3e16a22c41 not found: ID does not exist" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.143403 4734 scope.go:117] "RemoveContainer" containerID="feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186" Nov 25 09:53:43 crc kubenswrapper[4734]: E1125 09:53:43.143807 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186\": container with ID starting with feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186 not found: ID does not exist" containerID="feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186" Nov 25 09:53:43 crc kubenswrapper[4734]: I1125 09:53:43.143832 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186"} err="failed to get container status \"feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186\": rpc error: code = NotFound desc = could not find container \"feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186\": container with ID starting with feec40fc69a0259acdd796bd5dd50b7e53dba1f1f636c88ab08eeaf0ef250186 not found: ID does not exist" Nov 25 09:53:44 crc kubenswrapper[4734]: I1125 09:53:44.255918 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" path="/var/lib/kubelet/pods/1263d134-da0e-42d3-88b4-6b70edae22e0/volumes" Nov 25 09:53:50 crc kubenswrapper[4734]: I1125 09:53:50.696792 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:53:50 crc kubenswrapper[4734]: I1125 09:53:50.697436 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:54:01 crc kubenswrapper[4734]: I1125 09:54:01.079605 4734 scope.go:117] "RemoveContainer" containerID="9e8e2ad70f582808b2b274f5f4fd1381480ec8a2fef9215dd160f163dd827350" Nov 25 09:54:01 crc kubenswrapper[4734]: I1125 09:54:01.112023 4734 scope.go:117] "RemoveContainer" containerID="fff87419dfee11c5a00b2c0638ea0726276ba41cded2019a5a72362b55c89169" Nov 25 09:54:01 crc kubenswrapper[4734]: I1125 09:54:01.144599 4734 scope.go:117] "RemoveContainer" containerID="8b9467783608c58691954bfe0960b0875d3d66282949bfeb3e7f6d7a96a5562e" Nov 25 09:54:01 crc kubenswrapper[4734]: I1125 09:54:01.198469 4734 scope.go:117] "RemoveContainer" containerID="7c736635f48a846074173273ba88d79a572f8757627880aa0467ae73996891f5" Nov 25 09:54:01 crc kubenswrapper[4734]: I1125 09:54:01.215913 4734 scope.go:117] "RemoveContainer" containerID="3891cdaa4db16c256383ab0bfc872b898a7ee09834bafdb7742863b0dfb4f716" Nov 25 09:54:05 crc kubenswrapper[4734]: I1125 09:54:05.922283 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hmx94"] Nov 25 09:54:05 crc kubenswrapper[4734]: E1125 09:54:05.922942 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerName="registry-server" Nov 25 09:54:05 crc kubenswrapper[4734]: I1125 09:54:05.922958 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerName="registry-server" Nov 25 09:54:05 crc kubenswrapper[4734]: E1125 09:54:05.922973 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerName="extract-content" Nov 25 09:54:05 crc kubenswrapper[4734]: I1125 09:54:05.922982 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerName="extract-content" Nov 25 09:54:05 crc kubenswrapper[4734]: E1125 09:54:05.923001 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerName="extract-utilities" Nov 25 09:54:05 crc kubenswrapper[4734]: I1125 09:54:05.923011 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerName="extract-utilities" Nov 25 09:54:05 crc kubenswrapper[4734]: I1125 09:54:05.923205 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="1263d134-da0e-42d3-88b4-6b70edae22e0" containerName="registry-server" Nov 25 09:54:05 crc kubenswrapper[4734]: I1125 09:54:05.924351 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:05 crc kubenswrapper[4734]: I1125 09:54:05.943928 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hmx94"] Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.073886 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdsc4\" (UniqueName: \"kubernetes.io/projected/3a24d416-bded-48b9-9a89-67ec3c3425ef-kube-api-access-jdsc4\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.073981 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-utilities\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.074196 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-catalog-content\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.175917 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdsc4\" (UniqueName: \"kubernetes.io/projected/3a24d416-bded-48b9-9a89-67ec3c3425ef-kube-api-access-jdsc4\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.176034 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-utilities\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.176108 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-catalog-content\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.176762 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-utilities\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.176784 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-catalog-content\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.199726 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdsc4\" (UniqueName: \"kubernetes.io/projected/3a24d416-bded-48b9-9a89-67ec3c3425ef-kube-api-access-jdsc4\") pod \"certified-operators-hmx94\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.247338 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:06 crc kubenswrapper[4734]: I1125 09:54:06.483573 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hmx94"] Nov 25 09:54:07 crc kubenswrapper[4734]: I1125 09:54:07.248028 4734 generic.go:334] "Generic (PLEG): container finished" podID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerID="ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6" exitCode=0 Nov 25 09:54:07 crc kubenswrapper[4734]: I1125 09:54:07.248112 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmx94" event={"ID":"3a24d416-bded-48b9-9a89-67ec3c3425ef","Type":"ContainerDied","Data":"ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6"} Nov 25 09:54:07 crc kubenswrapper[4734]: I1125 09:54:07.248451 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmx94" event={"ID":"3a24d416-bded-48b9-9a89-67ec3c3425ef","Type":"ContainerStarted","Data":"c4a1a0ff83484fcf0c8fed8dc50244e54edb8e77bd4ccba76c50e8b74fe80ee6"} Nov 25 09:54:08 crc kubenswrapper[4734]: I1125 09:54:08.270728 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmx94" event={"ID":"3a24d416-bded-48b9-9a89-67ec3c3425ef","Type":"ContainerStarted","Data":"13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e"} Nov 25 09:54:09 crc kubenswrapper[4734]: I1125 09:54:09.279425 4734 generic.go:334] "Generic (PLEG): container finished" podID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerID="13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e" exitCode=0 Nov 25 09:54:09 crc kubenswrapper[4734]: I1125 09:54:09.279481 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmx94" event={"ID":"3a24d416-bded-48b9-9a89-67ec3c3425ef","Type":"ContainerDied","Data":"13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e"} Nov 25 09:54:10 crc kubenswrapper[4734]: I1125 09:54:10.290050 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmx94" event={"ID":"3a24d416-bded-48b9-9a89-67ec3c3425ef","Type":"ContainerStarted","Data":"1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e"} Nov 25 09:54:10 crc kubenswrapper[4734]: I1125 09:54:10.310853 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hmx94" podStartSLOduration=2.829095063 podStartE2EDuration="5.310835657s" podCreationTimestamp="2025-11-25 09:54:05 +0000 UTC" firstStartedPulling="2025-11-25 09:54:07.24943472 +0000 UTC m=+1570.059896724" lastFinishedPulling="2025-11-25 09:54:09.731175334 +0000 UTC m=+1572.541637318" observedRunningTime="2025-11-25 09:54:10.308377006 +0000 UTC m=+1573.118839020" watchObservedRunningTime="2025-11-25 09:54:10.310835657 +0000 UTC m=+1573.121297651" Nov 25 09:54:16 crc kubenswrapper[4734]: I1125 09:54:16.259339 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:16 crc kubenswrapper[4734]: I1125 09:54:16.260019 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:16 crc kubenswrapper[4734]: I1125 09:54:16.336029 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:16 crc kubenswrapper[4734]: I1125 09:54:16.392263 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:16 crc kubenswrapper[4734]: I1125 09:54:16.591209 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hmx94"] Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.350469 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hmx94" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerName="registry-server" containerID="cri-o://1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e" gracePeriod=2 Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.724206 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.886399 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdsc4\" (UniqueName: \"kubernetes.io/projected/3a24d416-bded-48b9-9a89-67ec3c3425ef-kube-api-access-jdsc4\") pod \"3a24d416-bded-48b9-9a89-67ec3c3425ef\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.886556 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-utilities\") pod \"3a24d416-bded-48b9-9a89-67ec3c3425ef\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.886604 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-catalog-content\") pod \"3a24d416-bded-48b9-9a89-67ec3c3425ef\" (UID: \"3a24d416-bded-48b9-9a89-67ec3c3425ef\") " Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.887347 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-utilities" (OuterVolumeSpecName: "utilities") pod "3a24d416-bded-48b9-9a89-67ec3c3425ef" (UID: "3a24d416-bded-48b9-9a89-67ec3c3425ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.892061 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a24d416-bded-48b9-9a89-67ec3c3425ef-kube-api-access-jdsc4" (OuterVolumeSpecName: "kube-api-access-jdsc4") pod "3a24d416-bded-48b9-9a89-67ec3c3425ef" (UID: "3a24d416-bded-48b9-9a89-67ec3c3425ef"). InnerVolumeSpecName "kube-api-access-jdsc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.934515 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a24d416-bded-48b9-9a89-67ec3c3425ef" (UID: "3a24d416-bded-48b9-9a89-67ec3c3425ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.988214 4734 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.988256 4734 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a24d416-bded-48b9-9a89-67ec3c3425ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:18 crc kubenswrapper[4734]: I1125 09:54:18.988271 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdsc4\" (UniqueName: \"kubernetes.io/projected/3a24d416-bded-48b9-9a89-67ec3c3425ef-kube-api-access-jdsc4\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.359660 4734 generic.go:334] "Generic (PLEG): container finished" podID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerID="1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e" exitCode=0 Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.359863 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmx94" event={"ID":"3a24d416-bded-48b9-9a89-67ec3c3425ef","Type":"ContainerDied","Data":"1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e"} Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.360046 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmx94" event={"ID":"3a24d416-bded-48b9-9a89-67ec3c3425ef","Type":"ContainerDied","Data":"c4a1a0ff83484fcf0c8fed8dc50244e54edb8e77bd4ccba76c50e8b74fe80ee6"} Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.360067 4734 scope.go:117] "RemoveContainer" containerID="1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.359959 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hmx94" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.392594 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hmx94"] Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.395625 4734 scope.go:117] "RemoveContainer" containerID="13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.396844 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hmx94"] Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.411283 4734 scope.go:117] "RemoveContainer" containerID="ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.435943 4734 scope.go:117] "RemoveContainer" containerID="1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e" Nov 25 09:54:19 crc kubenswrapper[4734]: E1125 09:54:19.436552 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e\": container with ID starting with 1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e not found: ID does not exist" containerID="1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.436600 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e"} err="failed to get container status \"1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e\": rpc error: code = NotFound desc = could not find container \"1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e\": container with ID starting with 1c6c2cb5a816844a7191742fca1d8ff05ae0db313da3beb951a8d988491f1e5e not found: ID does not exist" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.436639 4734 scope.go:117] "RemoveContainer" containerID="13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e" Nov 25 09:54:19 crc kubenswrapper[4734]: E1125 09:54:19.437329 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e\": container with ID starting with 13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e not found: ID does not exist" containerID="13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.437349 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e"} err="failed to get container status \"13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e\": rpc error: code = NotFound desc = could not find container \"13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e\": container with ID starting with 13e96de21ac687b38db2942790bc36bac2e351b69d07b8ce43178801de0f553e not found: ID does not exist" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.437361 4734 scope.go:117] "RemoveContainer" containerID="ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6" Nov 25 09:54:19 crc kubenswrapper[4734]: E1125 09:54:19.437767 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6\": container with ID starting with ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6 not found: ID does not exist" containerID="ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6" Nov 25 09:54:19 crc kubenswrapper[4734]: I1125 09:54:19.437804 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6"} err="failed to get container status \"ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6\": rpc error: code = NotFound desc = could not find container \"ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6\": container with ID starting with ad994f35bb6c64078f09501ce4cf4528f6736ed1690baea1b288b1261036d9a6 not found: ID does not exist" Nov 25 09:54:20 crc kubenswrapper[4734]: I1125 09:54:20.256063 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" path="/var/lib/kubelet/pods/3a24d416-bded-48b9-9a89-67ec3c3425ef/volumes" Nov 25 09:54:20 crc kubenswrapper[4734]: I1125 09:54:20.696350 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:54:20 crc kubenswrapper[4734]: I1125 09:54:20.696422 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:54:20 crc kubenswrapper[4734]: I1125 09:54:20.696477 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 09:54:20 crc kubenswrapper[4734]: I1125 09:54:20.697197 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:54:20 crc kubenswrapper[4734]: I1125 09:54:20.697262 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" gracePeriod=600 Nov 25 09:54:20 crc kubenswrapper[4734]: E1125 09:54:20.838800 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:54:21 crc kubenswrapper[4734]: I1125 09:54:21.379233 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" exitCode=0 Nov 25 09:54:21 crc kubenswrapper[4734]: I1125 09:54:21.379286 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db"} Nov 25 09:54:21 crc kubenswrapper[4734]: I1125 09:54:21.379339 4734 scope.go:117] "RemoveContainer" containerID="90ed44f721fb21f7c9a46b3cc3ce395345dcb1bd87653d89f7c1d2cf3cd435fb" Nov 25 09:54:21 crc kubenswrapper[4734]: I1125 09:54:21.379818 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:54:21 crc kubenswrapper[4734]: E1125 09:54:21.380121 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:54:33 crc kubenswrapper[4734]: I1125 09:54:33.247729 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:54:33 crc kubenswrapper[4734]: E1125 09:54:33.250579 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:54:48 crc kubenswrapper[4734]: I1125 09:54:48.247280 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:54:48 crc kubenswrapper[4734]: E1125 09:54:48.248106 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:55:00 crc kubenswrapper[4734]: I1125 09:55:00.251818 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:55:00 crc kubenswrapper[4734]: E1125 09:55:00.254486 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:55:01 crc kubenswrapper[4734]: I1125 09:55:01.341960 4734 scope.go:117] "RemoveContainer" containerID="eebb77b50be36935e18d60033a275119e3727c421e585d5c319b5eaadc424c13" Nov 25 09:55:01 crc kubenswrapper[4734]: I1125 09:55:01.387935 4734 scope.go:117] "RemoveContainer" containerID="ce45562aeb357c41614baf1b0c4278ccf57da7fd84deb334076d6978c5f6b348" Nov 25 09:55:01 crc kubenswrapper[4734]: I1125 09:55:01.403588 4734 scope.go:117] "RemoveContainer" containerID="008686f232377773a367a2e6083e9a4d2d5076bfad0694013ffd795dbd5af98c" Nov 25 09:55:01 crc kubenswrapper[4734]: I1125 09:55:01.448133 4734 scope.go:117] "RemoveContainer" containerID="40ed7dd79ced8b6e86bbc1ea93dc314d6703467b73066fa8acbd2386f8d03846" Nov 25 09:55:01 crc kubenswrapper[4734]: I1125 09:55:01.471476 4734 scope.go:117] "RemoveContainer" containerID="d0761cd256811635f37219d3ff8777c7e853b0800f1f1ddb642249dfe3096c67" Nov 25 09:55:13 crc kubenswrapper[4734]: I1125 09:55:13.247458 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:55:13 crc kubenswrapper[4734]: E1125 09:55:13.248726 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:55:27 crc kubenswrapper[4734]: I1125 09:55:27.247245 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:55:27 crc kubenswrapper[4734]: E1125 09:55:27.248733 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:55:40 crc kubenswrapper[4734]: I1125 09:55:40.259840 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:55:40 crc kubenswrapper[4734]: E1125 09:55:40.260850 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:55:53 crc kubenswrapper[4734]: I1125 09:55:53.247681 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:55:53 crc kubenswrapper[4734]: E1125 09:55:53.248892 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:56:01 crc kubenswrapper[4734]: I1125 09:56:01.574477 4734 scope.go:117] "RemoveContainer" containerID="bfd93691b8978dd518e7dbc577103b8c0996ae2578be11c554a8270abacf957e" Nov 25 09:56:01 crc kubenswrapper[4734]: I1125 09:56:01.623877 4734 scope.go:117] "RemoveContainer" containerID="4aad815f45c5844f21d839588a7ea82c52a0606e98855e801a6f3e77906d2164" Nov 25 09:56:01 crc kubenswrapper[4734]: I1125 09:56:01.667978 4734 scope.go:117] "RemoveContainer" containerID="352f24bb79461d5435843c53905eb5371b1c4ac39477d06bf848480839b99c2b" Nov 25 09:56:01 crc kubenswrapper[4734]: I1125 09:56:01.686156 4734 scope.go:117] "RemoveContainer" containerID="97a3b4e9bd33c43ff1cd5a5bc20eea5b4b0df122159b03f723288444ed3dc2f8" Nov 25 09:56:01 crc kubenswrapper[4734]: I1125 09:56:01.711985 4734 scope.go:117] "RemoveContainer" containerID="5a2b384d11064db35b5b9fada2a649959f493296388f8f3bf76b0c3115779377" Nov 25 09:56:06 crc kubenswrapper[4734]: I1125 09:56:06.247468 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:56:06 crc kubenswrapper[4734]: E1125 09:56:06.248563 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:56:21 crc kubenswrapper[4734]: I1125 09:56:21.246537 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:56:21 crc kubenswrapper[4734]: E1125 09:56:21.247215 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:56:33 crc kubenswrapper[4734]: I1125 09:56:33.246563 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:56:33 crc kubenswrapper[4734]: E1125 09:56:33.247393 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:56:44 crc kubenswrapper[4734]: I1125 09:56:44.247184 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:56:44 crc kubenswrapper[4734]: E1125 09:56:44.247968 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:56:57 crc kubenswrapper[4734]: I1125 09:56:57.246528 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:56:57 crc kubenswrapper[4734]: E1125 09:56:57.248274 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:57:01 crc kubenswrapper[4734]: I1125 09:57:01.804176 4734 scope.go:117] "RemoveContainer" containerID="58a58cfaa371de7b838b71ea899286d65364ca3c38ef5f2b7a0091b213e60c72" Nov 25 09:57:09 crc kubenswrapper[4734]: I1125 09:57:09.247308 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:57:09 crc kubenswrapper[4734]: E1125 09:57:09.248616 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:57:22 crc kubenswrapper[4734]: I1125 09:57:22.247058 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:57:22 crc kubenswrapper[4734]: E1125 09:57:22.247870 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:57:36 crc kubenswrapper[4734]: I1125 09:57:36.247360 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:57:36 crc kubenswrapper[4734]: E1125 09:57:36.248525 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:57:49 crc kubenswrapper[4734]: I1125 09:57:49.247377 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:57:49 crc kubenswrapper[4734]: E1125 09:57:49.248324 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:58:03 crc kubenswrapper[4734]: I1125 09:58:03.247793 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:58:03 crc kubenswrapper[4734]: E1125 09:58:03.249056 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:58:17 crc kubenswrapper[4734]: I1125 09:58:17.246311 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:58:17 crc kubenswrapper[4734]: E1125 09:58:17.247230 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:58:30 crc kubenswrapper[4734]: I1125 09:58:30.250991 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:58:30 crc kubenswrapper[4734]: E1125 09:58:30.251891 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:58:42 crc kubenswrapper[4734]: I1125 09:58:42.247931 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:58:42 crc kubenswrapper[4734]: E1125 09:58:42.248764 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:58:56 crc kubenswrapper[4734]: I1125 09:58:56.248117 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:58:56 crc kubenswrapper[4734]: E1125 09:58:56.249128 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:59:08 crc kubenswrapper[4734]: I1125 09:59:08.247275 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:59:08 crc kubenswrapper[4734]: E1125 09:59:08.248921 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2n2f8_openshift-machine-config-operator(b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1)\"" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" Nov 25 09:59:22 crc kubenswrapper[4734]: I1125 09:59:22.247840 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 09:59:22 crc kubenswrapper[4734]: I1125 09:59:22.757045 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"6e696de2d9262946d4c3e72b1d555d014c0784bdeaeb4cf7a686232b1fb917a1"} Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.144433 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh"] Nov 25 10:00:00 crc kubenswrapper[4734]: E1125 10:00:00.145318 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerName="extract-content" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.145335 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerName="extract-content" Nov 25 10:00:00 crc kubenswrapper[4734]: E1125 10:00:00.145345 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.145354 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4734]: E1125 10:00:00.145367 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerName="extract-utilities" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.145379 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerName="extract-utilities" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.145537 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a24d416-bded-48b9-9a89-67ec3c3425ef" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.146154 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.148716 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.152902 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.159443 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh"] Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.271311 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/436dd01d-984f-415f-9d39-a64d3adf6e0a-config-volume\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.271372 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cclj\" (UniqueName: \"kubernetes.io/projected/436dd01d-984f-415f-9d39-a64d3adf6e0a-kube-api-access-5cclj\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.271392 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/436dd01d-984f-415f-9d39-a64d3adf6e0a-secret-volume\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.372979 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/436dd01d-984f-415f-9d39-a64d3adf6e0a-config-volume\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.373410 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cclj\" (UniqueName: \"kubernetes.io/projected/436dd01d-984f-415f-9d39-a64d3adf6e0a-kube-api-access-5cclj\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.373450 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/436dd01d-984f-415f-9d39-a64d3adf6e0a-secret-volume\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.375227 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.379246 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/436dd01d-984f-415f-9d39-a64d3adf6e0a-secret-volume\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.384722 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/436dd01d-984f-415f-9d39-a64d3adf6e0a-config-volume\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.394215 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cclj\" (UniqueName: \"kubernetes.io/projected/436dd01d-984f-415f-9d39-a64d3adf6e0a-kube-api-access-5cclj\") pod \"collect-profiles-29401080-fqjxh\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.468126 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.476290 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:00 crc kubenswrapper[4734]: I1125 10:00:00.679500 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh"] Nov 25 10:00:01 crc kubenswrapper[4734]: I1125 10:00:01.082553 4734 generic.go:334] "Generic (PLEG): container finished" podID="436dd01d-984f-415f-9d39-a64d3adf6e0a" containerID="7dfff4d6b720adaebfd4eb6eebea16460b2e3546242f350429529d3f9400c4c7" exitCode=0 Nov 25 10:00:01 crc kubenswrapper[4734]: I1125 10:00:01.082610 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" event={"ID":"436dd01d-984f-415f-9d39-a64d3adf6e0a","Type":"ContainerDied","Data":"7dfff4d6b720adaebfd4eb6eebea16460b2e3546242f350429529d3f9400c4c7"} Nov 25 10:00:01 crc kubenswrapper[4734]: I1125 10:00:01.082683 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" event={"ID":"436dd01d-984f-415f-9d39-a64d3adf6e0a","Type":"ContainerStarted","Data":"10a9a761c396e2baa0ce0f29ad0cf9602504ca302c078152c998f70b12a983e2"} Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.338691 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.403510 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cclj\" (UniqueName: \"kubernetes.io/projected/436dd01d-984f-415f-9d39-a64d3adf6e0a-kube-api-access-5cclj\") pod \"436dd01d-984f-415f-9d39-a64d3adf6e0a\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.403626 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/436dd01d-984f-415f-9d39-a64d3adf6e0a-secret-volume\") pod \"436dd01d-984f-415f-9d39-a64d3adf6e0a\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.403673 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/436dd01d-984f-415f-9d39-a64d3adf6e0a-config-volume\") pod \"436dd01d-984f-415f-9d39-a64d3adf6e0a\" (UID: \"436dd01d-984f-415f-9d39-a64d3adf6e0a\") " Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.404385 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/436dd01d-984f-415f-9d39-a64d3adf6e0a-config-volume" (OuterVolumeSpecName: "config-volume") pod "436dd01d-984f-415f-9d39-a64d3adf6e0a" (UID: "436dd01d-984f-415f-9d39-a64d3adf6e0a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.408525 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/436dd01d-984f-415f-9d39-a64d3adf6e0a-kube-api-access-5cclj" (OuterVolumeSpecName: "kube-api-access-5cclj") pod "436dd01d-984f-415f-9d39-a64d3adf6e0a" (UID: "436dd01d-984f-415f-9d39-a64d3adf6e0a"). InnerVolumeSpecName "kube-api-access-5cclj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.408947 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/436dd01d-984f-415f-9d39-a64d3adf6e0a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "436dd01d-984f-415f-9d39-a64d3adf6e0a" (UID: "436dd01d-984f-415f-9d39-a64d3adf6e0a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.504759 4734 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/436dd01d-984f-415f-9d39-a64d3adf6e0a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.505186 4734 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/436dd01d-984f-415f-9d39-a64d3adf6e0a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:02 crc kubenswrapper[4734]: I1125 10:00:02.505311 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cclj\" (UniqueName: \"kubernetes.io/projected/436dd01d-984f-415f-9d39-a64d3adf6e0a-kube-api-access-5cclj\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:03 crc kubenswrapper[4734]: I1125 10:00:03.095951 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" event={"ID":"436dd01d-984f-415f-9d39-a64d3adf6e0a","Type":"ContainerDied","Data":"10a9a761c396e2baa0ce0f29ad0cf9602504ca302c078152c998f70b12a983e2"} Nov 25 10:00:03 crc kubenswrapper[4734]: I1125 10:00:03.096042 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10a9a761c396e2baa0ce0f29ad0cf9602504ca302c078152c998f70b12a983e2" Nov 25 10:00:03 crc kubenswrapper[4734]: I1125 10:00:03.095988 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-fqjxh" Nov 25 10:00:53 crc kubenswrapper[4734]: I1125 10:00:53.045158 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkh4p"] Nov 25 10:00:53 crc kubenswrapper[4734]: I1125 10:00:53.050380 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j"] Nov 25 10:00:53 crc kubenswrapper[4734]: I1125 10:00:53.055191 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-create-mkh4p"] Nov 25 10:00:53 crc kubenswrapper[4734]: I1125 10:00:53.059508 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-77ea-account-create-update-46t5j"] Nov 25 10:00:54 crc kubenswrapper[4734]: I1125 10:00:54.261584 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dcdddcc-8bac-4aae-acfa-8f10f49b98d4" path="/var/lib/kubelet/pods/5dcdddcc-8bac-4aae-acfa-8f10f49b98d4/volumes" Nov 25 10:00:54 crc kubenswrapper[4734]: I1125 10:00:54.262692 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8e99e93-5f83-4568-a186-f712828e67b7" path="/var/lib/kubelet/pods/e8e99e93-5f83-4568-a186-f712828e67b7/volumes" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.023688 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-fvjv8"] Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.029864 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-db-sync-fvjv8"] Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.137040 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone-cron-29401081-zg5rm"] Nov 25 10:01:00 crc kubenswrapper[4734]: E1125 10:01:00.137373 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="436dd01d-984f-415f-9d39-a64d3adf6e0a" containerName="collect-profiles" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.137398 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="436dd01d-984f-415f-9d39-a64d3adf6e0a" containerName="collect-profiles" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.137540 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="436dd01d-984f-415f-9d39-a64d3adf6e0a" containerName="collect-profiles" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.138062 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.145327 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-cron-29401081-zg5rm"] Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.215027 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxmvk\" (UniqueName: \"kubernetes.io/projected/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-kube-api-access-kxmvk\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.215114 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-config-data\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.215185 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-fernet-keys\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.255048 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9be5a607-1a91-43aa-953f-bb0950d5a9c2" path="/var/lib/kubelet/pods/9be5a607-1a91-43aa-953f-bb0950d5a9c2/volumes" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.316499 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-fernet-keys\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.316610 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxmvk\" (UniqueName: \"kubernetes.io/projected/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-kube-api-access-kxmvk\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.316637 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-config-data\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.323039 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-config-data\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.323200 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-fernet-keys\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.334953 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxmvk\" (UniqueName: \"kubernetes.io/projected/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-kube-api-access-kxmvk\") pod \"keystone-cron-29401081-zg5rm\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.452653 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:00 crc kubenswrapper[4734]: I1125 10:01:00.858954 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone-cron-29401081-zg5rm"] Nov 25 10:01:01 crc kubenswrapper[4734]: I1125 10:01:01.518857 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" event={"ID":"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533","Type":"ContainerStarted","Data":"8a0e24d13558873702068441d668f30e401a473723997e70681ca5ace8197b23"} Nov 25 10:01:01 crc kubenswrapper[4734]: I1125 10:01:01.518909 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" event={"ID":"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533","Type":"ContainerStarted","Data":"72a6f889ca13eb2f41992dfd0011de8436d3e318f771a17dba519c6c7582c52e"} Nov 25 10:01:01 crc kubenswrapper[4734]: I1125 10:01:01.542268 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" podStartSLOduration=1.54223841 podStartE2EDuration="1.54223841s" podCreationTimestamp="2025-11-25 10:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:01.537664537 +0000 UTC m=+1984.348126531" watchObservedRunningTime="2025-11-25 10:01:01.54223841 +0000 UTC m=+1984.352700394" Nov 25 10:01:01 crc kubenswrapper[4734]: I1125 10:01:01.893252 4734 scope.go:117] "RemoveContainer" containerID="daa34881bab8b0efc2e1c69491aa5760bf9ca150c399fc27584bdd3a9e21c50f" Nov 25 10:01:01 crc kubenswrapper[4734]: I1125 10:01:01.924182 4734 scope.go:117] "RemoveContainer" containerID="6234346b37ff6e67d0a506db2a89361b6c50d855bf5d59781f9b93f83d6073a7" Nov 25 10:01:01 crc kubenswrapper[4734]: I1125 10:01:01.943965 4734 scope.go:117] "RemoveContainer" containerID="b5449337a056801c1c4a5baf1473365cf8f4363b8b63ac465c4c86a992f58728" Nov 25 10:01:03 crc kubenswrapper[4734]: I1125 10:01:03.533437 4734 generic.go:334] "Generic (PLEG): container finished" podID="d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533" containerID="8a0e24d13558873702068441d668f30e401a473723997e70681ca5ace8197b23" exitCode=0 Nov 25 10:01:03 crc kubenswrapper[4734]: I1125 10:01:03.533526 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" event={"ID":"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533","Type":"ContainerDied","Data":"8a0e24d13558873702068441d668f30e401a473723997e70681ca5ace8197b23"} Nov 25 10:01:04 crc kubenswrapper[4734]: I1125 10:01:04.874635 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:04 crc kubenswrapper[4734]: I1125 10:01:04.993848 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-config-data\") pod \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " Nov 25 10:01:04 crc kubenswrapper[4734]: I1125 10:01:04.994005 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxmvk\" (UniqueName: \"kubernetes.io/projected/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-kube-api-access-kxmvk\") pod \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " Nov 25 10:01:04 crc kubenswrapper[4734]: I1125 10:01:04.994043 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-fernet-keys\") pod \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\" (UID: \"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533\") " Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.002178 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-kube-api-access-kxmvk" (OuterVolumeSpecName: "kube-api-access-kxmvk") pod "d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533" (UID: "d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533"). InnerVolumeSpecName "kube-api-access-kxmvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.003116 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533" (UID: "d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.045649 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-config-data" (OuterVolumeSpecName: "config-data") pod "d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533" (UID: "d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.096279 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxmvk\" (UniqueName: \"kubernetes.io/projected/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-kube-api-access-kxmvk\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.096316 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.096332 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.549012 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" event={"ID":"d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533","Type":"ContainerDied","Data":"72a6f889ca13eb2f41992dfd0011de8436d3e318f771a17dba519c6c7582c52e"} Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.549060 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72a6f889ca13eb2f41992dfd0011de8436d3e318f771a17dba519c6c7582c52e" Nov 25 10:01:05 crc kubenswrapper[4734]: I1125 10:01:05.549076 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-cron-29401081-zg5rm" Nov 25 10:01:07 crc kubenswrapper[4734]: I1125 10:01:07.042609 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-942pc"] Nov 25 10:01:07 crc kubenswrapper[4734]: I1125 10:01:07.055638 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-bootstrap-942pc"] Nov 25 10:01:08 crc kubenswrapper[4734]: I1125 10:01:08.255155 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2b5a9bc-be61-416c-aa03-9a4a93b2b861" path="/var/lib/kubelet/pods/e2b5a9bc-be61-416c-aa03-9a4a93b2b861/volumes" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.661253 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 10:01:12 crc kubenswrapper[4734]: E1125 10:01:12.662559 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533" containerName="keystone-cron" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.662592 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533" containerName="keystone-cron" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.662940 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533" containerName="keystone-cron" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.664039 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.667881 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.668297 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.675988 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.807435 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75640e54-3770-48c9-9b24-6186bfff6024-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"75640e54-3770-48c9-9b24-6186bfff6024\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.807554 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75640e54-3770-48c9-9b24-6186bfff6024-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"75640e54-3770-48c9-9b24-6186bfff6024\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.908899 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75640e54-3770-48c9-9b24-6186bfff6024-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"75640e54-3770-48c9-9b24-6186bfff6024\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.908994 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75640e54-3770-48c9-9b24-6186bfff6024-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"75640e54-3770-48c9-9b24-6186bfff6024\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.909022 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75640e54-3770-48c9-9b24-6186bfff6024-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"75640e54-3770-48c9-9b24-6186bfff6024\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:12 crc kubenswrapper[4734]: I1125 10:01:12.930952 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75640e54-3770-48c9-9b24-6186bfff6024-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"75640e54-3770-48c9-9b24-6186bfff6024\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:13 crc kubenswrapper[4734]: I1125 10:01:13.002875 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:13 crc kubenswrapper[4734]: I1125 10:01:13.197303 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 10:01:13 crc kubenswrapper[4734]: I1125 10:01:13.610825 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"75640e54-3770-48c9-9b24-6186bfff6024","Type":"ContainerStarted","Data":"4724829e47d89090e5dfb6a5a7743a07c7f21c65ae40bf9e1dc1d4d067e66331"} Nov 25 10:01:14 crc kubenswrapper[4734]: I1125 10:01:14.622385 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"75640e54-3770-48c9-9b24-6186bfff6024","Type":"ContainerStarted","Data":"3dc0c1900bfcd1bcf3ac607884f6025176462a8213c40e4f5fb7a61f66ee213a"} Nov 25 10:01:15 crc kubenswrapper[4734]: I1125 10:01:15.630055 4734 generic.go:334] "Generic (PLEG): container finished" podID="75640e54-3770-48c9-9b24-6186bfff6024" containerID="3dc0c1900bfcd1bcf3ac607884f6025176462a8213c40e4f5fb7a61f66ee213a" exitCode=0 Nov 25 10:01:15 crc kubenswrapper[4734]: I1125 10:01:15.630178 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"75640e54-3770-48c9-9b24-6186bfff6024","Type":"ContainerDied","Data":"3dc0c1900bfcd1bcf3ac607884f6025176462a8213c40e4f5fb7a61f66ee213a"} Nov 25 10:01:16 crc kubenswrapper[4734]: I1125 10:01:16.926502 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.071562 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75640e54-3770-48c9-9b24-6186bfff6024-kube-api-access\") pod \"75640e54-3770-48c9-9b24-6186bfff6024\" (UID: \"75640e54-3770-48c9-9b24-6186bfff6024\") " Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.071723 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75640e54-3770-48c9-9b24-6186bfff6024-kubelet-dir\") pod \"75640e54-3770-48c9-9b24-6186bfff6024\" (UID: \"75640e54-3770-48c9-9b24-6186bfff6024\") " Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.072317 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/75640e54-3770-48c9-9b24-6186bfff6024-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "75640e54-3770-48c9-9b24-6186bfff6024" (UID: "75640e54-3770-48c9-9b24-6186bfff6024"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.081604 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75640e54-3770-48c9-9b24-6186bfff6024-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "75640e54-3770-48c9-9b24-6186bfff6024" (UID: "75640e54-3770-48c9-9b24-6186bfff6024"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.173594 4734 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/75640e54-3770-48c9-9b24-6186bfff6024-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.173636 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/75640e54-3770-48c9-9b24-6186bfff6024-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.240113 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 10:01:17 crc kubenswrapper[4734]: E1125 10:01:17.240586 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75640e54-3770-48c9-9b24-6186bfff6024" containerName="pruner" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.240612 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="75640e54-3770-48c9-9b24-6186bfff6024" containerName="pruner" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.240821 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="75640e54-3770-48c9-9b24-6186bfff6024" containerName="pruner" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.241530 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.250638 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.375988 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kubelet-dir\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.376307 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kube-api-access\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.376447 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-var-lock\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.477808 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kubelet-dir\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.478200 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kube-api-access\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.477970 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kubelet-dir\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.478293 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-var-lock\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.478479 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-var-lock\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.507253 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kube-api-access\") pod \"installer-9-crc\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.568467 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.647331 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"75640e54-3770-48c9-9b24-6186bfff6024","Type":"ContainerDied","Data":"4724829e47d89090e5dfb6a5a7743a07c7f21c65ae40bf9e1dc1d4d067e66331"} Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.647687 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4724829e47d89090e5dfb6a5a7743a07c7f21c65ae40bf9e1dc1d4d067e66331" Nov 25 10:01:17 crc kubenswrapper[4734]: I1125 10:01:17.647407 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 10:01:18 crc kubenswrapper[4734]: I1125 10:01:18.058891 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 10:01:18 crc kubenswrapper[4734]: W1125 10:01:18.072279 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod09aaf86b_7cc8_4d2c_a0be_e0b22174c258.slice/crio-762a7d4f8f08704357ee2a21363806ecbdb9c19d7846bb774cc558a7fda0d226 WatchSource:0}: Error finding container 762a7d4f8f08704357ee2a21363806ecbdb9c19d7846bb774cc558a7fda0d226: Status 404 returned error can't find the container with id 762a7d4f8f08704357ee2a21363806ecbdb9c19d7846bb774cc558a7fda0d226 Nov 25 10:01:18 crc kubenswrapper[4734]: I1125 10:01:18.657860 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"09aaf86b-7cc8-4d2c-a0be-e0b22174c258","Type":"ContainerStarted","Data":"4d2a09d8f5b791bfeb42c2bc612116c998c3dd7399e2b97731f66d6954ca6c07"} Nov 25 10:01:18 crc kubenswrapper[4734]: I1125 10:01:18.658291 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"09aaf86b-7cc8-4d2c-a0be-e0b22174c258","Type":"ContainerStarted","Data":"762a7d4f8f08704357ee2a21363806ecbdb9c19d7846bb774cc558a7fda0d226"} Nov 25 10:01:18 crc kubenswrapper[4734]: I1125 10:01:18.678696 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=1.6786719460000001 podStartE2EDuration="1.678671946s" podCreationTimestamp="2025-11-25 10:01:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:18.674819664 +0000 UTC m=+2001.485281668" watchObservedRunningTime="2025-11-25 10:01:18.678671946 +0000 UTC m=+2001.489133960" Nov 25 10:01:50 crc kubenswrapper[4734]: I1125 10:01:50.695574 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:01:50 crc kubenswrapper[4734]: I1125 10:01:50.696074 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.176901 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/openstackclient"] Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.177749 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/openstackclient" podUID="8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" containerName="openstackclient" containerID="cri-o://a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325" gracePeriod=30 Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.677954 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstackclient" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.806586 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7cv2\" (UniqueName: \"kubernetes.io/projected/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-kube-api-access-x7cv2\") pod \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.806755 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config\") pod \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.806818 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config-secret\") pod \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\" (UID: \"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc\") " Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.814307 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-kube-api-access-x7cv2" (OuterVolumeSpecName: "kube-api-access-x7cv2") pod "8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" (UID: "8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc"). InnerVolumeSpecName "kube-api-access-x7cv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.828406 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" (UID: "8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.837792 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" (UID: "8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.909229 4734 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.909276 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7cv2\" (UniqueName: \"kubernetes.io/projected/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-kube-api-access-x7cv2\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.909293 4734 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.998870 4734 generic.go:334] "Generic (PLEG): container finished" podID="8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" containerID="a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325" exitCode=143 Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.998998 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstackclient" event={"ID":"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc","Type":"ContainerDied","Data":"a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325"} Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.998968 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/openstackclient" Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.999060 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/openstackclient" event={"ID":"8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc","Type":"ContainerDied","Data":"12b7f2ece0f5806b2467efa7ae288bb54a054c84457f9a54de02b70acbc060bc"} Nov 25 10:01:55 crc kubenswrapper[4734]: I1125 10:01:55.999127 4734 scope.go:117] "RemoveContainer" containerID="a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.026221 4734 scope.go:117] "RemoveContainer" containerID="a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325" Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.027118 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325\": container with ID starting with a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325 not found: ID does not exist" containerID="a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.027237 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325"} err="failed to get container status \"a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325\": rpc error: code = NotFound desc = could not find container \"a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325\": container with ID starting with a34145aa6e6026cb23727779fdcc4a45d0a01c94cb1896a624a6052c1e291325 not found: ID does not exist" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.047961 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/openstackclient"] Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.051760 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/openstackclient"] Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.109648 4734 file.go:109] "Unable to process watch event" err="can't process config file \"/etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml\": /etc/kubernetes/manifests/kube-apiserver-startup-monitor-pod.yaml: couldn't parse as pod(Object 'Kind' is missing in 'null'), please check config file" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.111690 4734 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.117157 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" containerName="openstackclient" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.117206 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" containerName="openstackclient" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.119619 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" containerName="openstackclient" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.123516 4734 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.125195 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555" gracePeriod=15 Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.125571 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.126433 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e" gracePeriod=15 Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.126544 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5" gracePeriod=15 Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.126702 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5" gracePeriod=15 Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.126860 4734 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.127016 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed" gracePeriod=15 Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.127894 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.127913 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.127938 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.127954 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.127974 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.127983 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.131576 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.131627 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.132831 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.132851 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.132891 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.132902 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.132924 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.132934 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.133880 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.133902 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.133926 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.133958 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.133988 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.134004 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: E1125 10:01:56.134484 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.134504 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.135098 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.265399 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc" path="/var/lib/kubelet/pods/8cbad043-b8dd-44b6-81dd-0f9bdbb7d9bc/volumes" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.320047 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.320143 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.320167 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.320206 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.320247 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.320283 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.320307 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.320361 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421009 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421058 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421111 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421137 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421170 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421205 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421179 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421234 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421194 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421249 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421267 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421290 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421455 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421501 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421543 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:56 crc kubenswrapper[4734]: I1125 10:01:56.421628 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.012388 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.014550 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.015829 4734 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e" exitCode=0 Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.015871 4734 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5" exitCode=0 Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.015889 4734 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed" exitCode=0 Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.015905 4734 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5" exitCode=2 Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.015962 4734 scope.go:117] "RemoveContainer" containerID="01a13e8edb2239dde4c011fd9489a5ac353d016b04ddb52509d107b8ef061999" Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.020932 4734 generic.go:334] "Generic (PLEG): container finished" podID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" containerID="4d2a09d8f5b791bfeb42c2bc612116c998c3dd7399e2b97731f66d6954ca6c07" exitCode=0 Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.020987 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"09aaf86b-7cc8-4d2c-a0be-e0b22174c258","Type":"ContainerDied","Data":"4d2a09d8f5b791bfeb42c2bc612116c998c3dd7399e2b97731f66d6954ca6c07"} Nov 25 10:01:57 crc kubenswrapper[4734]: I1125 10:01:57.022029 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.029527 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.355440 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.356147 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.462926 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-var-lock\") pod \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.463034 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kube-api-access\") pod \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.463067 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kubelet-dir\") pod \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\" (UID: \"09aaf86b-7cc8-4d2c-a0be-e0b22174c258\") " Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.463097 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-var-lock" (OuterVolumeSpecName: "var-lock") pod "09aaf86b-7cc8-4d2c-a0be-e0b22174c258" (UID: "09aaf86b-7cc8-4d2c-a0be-e0b22174c258"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.463219 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "09aaf86b-7cc8-4d2c-a0be-e0b22174c258" (UID: "09aaf86b-7cc8-4d2c-a0be-e0b22174c258"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.463372 4734 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.463393 4734 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.471114 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "09aaf86b-7cc8-4d2c-a0be-e0b22174c258" (UID: "09aaf86b-7cc8-4d2c-a0be-e0b22174c258"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.564539 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09aaf86b-7cc8-4d2c-a0be-e0b22174c258-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.645199 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.645990 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.646695 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.647206 4734 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.665985 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.666074 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.666082 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.666219 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.666295 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.666375 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.666651 4734 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.666682 4734 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:58 crc kubenswrapper[4734]: I1125 10:01:58.666699 4734 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.039925 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"09aaf86b-7cc8-4d2c-a0be-e0b22174c258","Type":"ContainerDied","Data":"762a7d4f8f08704357ee2a21363806ecbdb9c19d7846bb774cc558a7fda0d226"} Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.040189 4734 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="762a7d4f8f08704357ee2a21363806ecbdb9c19d7846bb774cc558a7fda0d226" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.039994 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.043867 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.044744 4734 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555" exitCode=0 Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.044856 4734 scope.go:117] "RemoveContainer" containerID="61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.045007 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.053426 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.054358 4734 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.067408 4734 scope.go:117] "RemoveContainer" containerID="99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.075883 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.076527 4734 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.087750 4734 scope.go:117] "RemoveContainer" containerID="f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.108590 4734 scope.go:117] "RemoveContainer" containerID="99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.127806 4734 scope.go:117] "RemoveContainer" containerID="dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.152243 4734 scope.go:117] "RemoveContainer" containerID="39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.190523 4734 scope.go:117] "RemoveContainer" containerID="61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e" Nov 25 10:01:59 crc kubenswrapper[4734]: E1125 10:01:59.191018 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\": container with ID starting with 61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e not found: ID does not exist" containerID="61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.191063 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e"} err="failed to get container status \"61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\": rpc error: code = NotFound desc = could not find container \"61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e\": container with ID starting with 61b34be6904435bedafdbe1d7c3f0f282bec99ec0c8a2c4c50880965e5822d0e not found: ID does not exist" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.191108 4734 scope.go:117] "RemoveContainer" containerID="99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5" Nov 25 10:01:59 crc kubenswrapper[4734]: E1125 10:01:59.191748 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\": container with ID starting with 99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5 not found: ID does not exist" containerID="99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.191781 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5"} err="failed to get container status \"99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\": rpc error: code = NotFound desc = could not find container \"99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5\": container with ID starting with 99c281fd1d674d417e228361bf16c6159aaede534abebd2577cf75c82de62ea5 not found: ID does not exist" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.191799 4734 scope.go:117] "RemoveContainer" containerID="f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed" Nov 25 10:01:59 crc kubenswrapper[4734]: E1125 10:01:59.192709 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\": container with ID starting with f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed not found: ID does not exist" containerID="f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.192746 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed"} err="failed to get container status \"f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\": rpc error: code = NotFound desc = could not find container \"f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed\": container with ID starting with f3cf2dbf0d6d03f92e97b70a6cdc3f1ad6a18f80033ec1a18d7e71b95fd0f5ed not found: ID does not exist" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.192764 4734 scope.go:117] "RemoveContainer" containerID="99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5" Nov 25 10:01:59 crc kubenswrapper[4734]: E1125 10:01:59.193298 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\": container with ID starting with 99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5 not found: ID does not exist" containerID="99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.193375 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5"} err="failed to get container status \"99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\": rpc error: code = NotFound desc = could not find container \"99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5\": container with ID starting with 99707e51c94fa77b24645a7134be29b23ae07673cd546c8559a43499b36279f5 not found: ID does not exist" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.193428 4734 scope.go:117] "RemoveContainer" containerID="dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555" Nov 25 10:01:59 crc kubenswrapper[4734]: E1125 10:01:59.194045 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\": container with ID starting with dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555 not found: ID does not exist" containerID="dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.194104 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555"} err="failed to get container status \"dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\": rpc error: code = NotFound desc = could not find container \"dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555\": container with ID starting with dd6c4c3bad3fd30ab46359e597a20dddb941ee9b3896e4a553b85121e4a4a555 not found: ID does not exist" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.194131 4734 scope.go:117] "RemoveContainer" containerID="39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626" Nov 25 10:01:59 crc kubenswrapper[4734]: E1125 10:01:59.194465 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\": container with ID starting with 39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626 not found: ID does not exist" containerID="39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626" Nov 25 10:01:59 crc kubenswrapper[4734]: I1125 10:01:59.194494 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626"} err="failed to get container status \"39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\": rpc error: code = NotFound desc = could not find container \"39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626\": container with ID starting with 39796f9b1c26e0311304c2653c83221ae7df3dbb58a7265448d1760c67d1a626 not found: ID does not exist" Nov 25 10:02:00 crc kubenswrapper[4734]: I1125 10:02:00.251226 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:00 crc kubenswrapper[4734]: I1125 10:02:00.252065 4734 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:00 crc kubenswrapper[4734]: I1125 10:02:00.255170 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 10:02:01 crc kubenswrapper[4734]: E1125 10:02:01.193880 4734 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.5:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:02:01 crc kubenswrapper[4734]: I1125 10:02:01.194341 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:02:01 crc kubenswrapper[4734]: E1125 10:02:01.230451 4734 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.5:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b37b30ea1eb57 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 10:02:01.229847383 +0000 UTC m=+2044.040309377,LastTimestamp:2025-11-25 10:02:01.229847383 +0000 UTC m=+2044.040309377,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 10:02:01 crc kubenswrapper[4734]: E1125 10:02:01.291380 4734 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC keystone-kuttl-tests/mysql-db-openstack-galera-1: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/keystone-kuttl-tests/persistentvolumeclaims/mysql-db-openstack-galera-1\": dial tcp 38.102.83.5:6443: connect: connection refused" pod="keystone-kuttl-tests/openstack-galera-1" volumeName="mysql-db" Nov 25 10:02:02 crc kubenswrapper[4734]: I1125 10:02:02.008862 4734 scope.go:117] "RemoveContainer" containerID="f3e2e7ae79c4fad99eab62265ca9b4e046eb50f8d2563212be819e484f8e2bc7" Nov 25 10:02:02 crc kubenswrapper[4734]: I1125 10:02:02.075116 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc"} Nov 25 10:02:02 crc kubenswrapper[4734]: I1125 10:02:02.075174 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"546b911ef13aca0a96f668e75bfbc3b56f60a003abc85fa5fdac84e17da9381c"} Nov 25 10:02:02 crc kubenswrapper[4734]: E1125 10:02:02.075825 4734 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.5:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:02:02 crc kubenswrapper[4734]: I1125 10:02:02.075952 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:05 crc kubenswrapper[4734]: E1125 10:02:05.457182 4734 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:05 crc kubenswrapper[4734]: E1125 10:02:05.457713 4734 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:05 crc kubenswrapper[4734]: E1125 10:02:05.457929 4734 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:05 crc kubenswrapper[4734]: E1125 10:02:05.458109 4734 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:05 crc kubenswrapper[4734]: E1125 10:02:05.458296 4734 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:05 crc kubenswrapper[4734]: I1125 10:02:05.458318 4734 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 10:02:05 crc kubenswrapper[4734]: E1125 10:02:05.458526 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="200ms" Nov 25 10:02:05 crc kubenswrapper[4734]: E1125 10:02:05.659738 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="400ms" Nov 25 10:02:06 crc kubenswrapper[4734]: E1125 10:02:06.060281 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="800ms" Nov 25 10:02:06 crc kubenswrapper[4734]: E1125 10:02:06.861264 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="1.6s" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.126035 4734 generic.go:334] "Generic (PLEG): container finished" podID="eeac7687-6578-41aa-99da-c576d6162d9e" containerID="437746ac5415afbb4539bd1637931458dd0a4102e3630423816351a3ff378f85" exitCode=1 Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.126065 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" event={"ID":"eeac7687-6578-41aa-99da-c576d6162d9e","Type":"ContainerDied","Data":"437746ac5415afbb4539bd1637931458dd0a4102e3630423816351a3ff378f85"} Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.126645 4734 scope.go:117] "RemoveContainer" containerID="437746ac5415afbb4539bd1637931458dd0a4102e3630423816351a3ff378f85" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.126796 4734 status_manager.go:851] "Failed to get status for pod" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/keystone-operator-controller-manager-59fbfdbcd7-cvvvx\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.127266 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.246477 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.247734 4734 status_manager.go:851] "Failed to get status for pod" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/keystone-operator-controller-manager-59fbfdbcd7-cvvvx\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.247963 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.275286 4734 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.275317 4734 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:07 crc kubenswrapper[4734]: E1125 10:02:07.275820 4734 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.276586 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:07 crc kubenswrapper[4734]: W1125 10:02:07.300135 4734 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-9c106a78c5ee29a751d3cbbd89ff963f50a18a094e5df16defb1ce549d8cb9dd WatchSource:0}: Error finding container 9c106a78c5ee29a751d3cbbd89ff963f50a18a094e5df16defb1ce549d8cb9dd: Status 404 returned error can't find the container with id 9c106a78c5ee29a751d3cbbd89ff963f50a18a094e5df16defb1ce549d8cb9dd Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.492201 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.975573 4734 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" podUID="842336e7-3fca-4ce9-b030-735f9fa84367" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.53:8081/readyz\": dial tcp 10.217.0.53:8081: connect: connection refused" Nov 25 10:02:07 crc kubenswrapper[4734]: I1125 10:02:07.975628 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" podUID="842336e7-3fca-4ce9-b030-735f9fa84367" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.53:8081/healthz\": dial tcp 10.217.0.53:8081: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: E1125 10:02:08.086611 4734 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.5:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b37b30ea1eb57 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 10:02:01.229847383 +0000 UTC m=+2044.040309377,LastTimestamp:2025-11-25 10:02:01.229847383 +0000 UTC m=+2044.040309377,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.137228 4734 generic.go:334] "Generic (PLEG): container finished" podID="eeac7687-6578-41aa-99da-c576d6162d9e" containerID="f34b64d81faf9bb122db3a9a879fb3859c937a4d1f0890de89ccae859bc57ee4" exitCode=1 Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.137368 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" event={"ID":"eeac7687-6578-41aa-99da-c576d6162d9e","Type":"ContainerDied","Data":"f34b64d81faf9bb122db3a9a879fb3859c937a4d1f0890de89ccae859bc57ee4"} Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.137432 4734 scope.go:117] "RemoveContainer" containerID="437746ac5415afbb4539bd1637931458dd0a4102e3630423816351a3ff378f85" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.137985 4734 scope.go:117] "RemoveContainer" containerID="f34b64d81faf9bb122db3a9a879fb3859c937a4d1f0890de89ccae859bc57ee4" Nov 25 10:02:08 crc kubenswrapper[4734]: E1125 10:02:08.138677 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-59fbfdbcd7-cvvvx_openstack-operators(eeac7687-6578-41aa-99da-c576d6162d9e)\"" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.138687 4734 status_manager.go:851] "Failed to get status for pod" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/keystone-operator-controller-manager-59fbfdbcd7-cvvvx\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.139308 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.140867 4734 generic.go:334] "Generic (PLEG): container finished" podID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" containerID="a35b7abc65ef7865dc9477490333d7276d0cf7ccdd883315fe1776db28642be6" exitCode=1 Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.141000 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" event={"ID":"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6","Type":"ContainerDied","Data":"a35b7abc65ef7865dc9477490333d7276d0cf7ccdd883315fe1776db28642be6"} Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.141740 4734 scope.go:117] "RemoveContainer" containerID="a35b7abc65ef7865dc9477490333d7276d0cf7ccdd883315fe1776db28642be6" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.142895 4734 status_manager.go:851] "Failed to get status for pod" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/keystone-operator-controller-manager-59fbfdbcd7-cvvvx\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.143245 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.143719 4734 status_manager.go:851] "Failed to get status for pod" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-664f78b8b-n7g4f\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.144870 4734 generic.go:334] "Generic (PLEG): container finished" podID="d552db43-2924-4931-a15d-f8803531210f" containerID="9f7dc98b5bceffa7f3567dc1c51ef65556d04b72bb95638f87897d1f47192b6a" exitCode=1 Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.144984 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" event={"ID":"d552db43-2924-4931-a15d-f8803531210f","Type":"ContainerDied","Data":"9f7dc98b5bceffa7f3567dc1c51ef65556d04b72bb95638f87897d1f47192b6a"} Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.145655 4734 scope.go:117] "RemoveContainer" containerID="9f7dc98b5bceffa7f3567dc1c51ef65556d04b72bb95638f87897d1f47192b6a" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.146165 4734 status_manager.go:851] "Failed to get status for pod" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-664f78b8b-n7g4f\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.146640 4734 status_manager.go:851] "Failed to get status for pod" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/keystone-operator-controller-manager-59fbfdbcd7-cvvvx\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.147213 4734 status_manager.go:851] "Failed to get status for pod" podUID="d552db43-2924-4931-a15d-f8803531210f" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/rabbitmq-cluster-operator-779fc9694b-7tv4k\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.147682 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.149153 4734 generic.go:334] "Generic (PLEG): container finished" podID="ae285841-6883-40fd-aa4c-dc13e1afdf95" containerID="cf51382140cc95b0204a58895b7628e8d351e46c4718b09c604ff064c71bcd79" exitCode=1 Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.149250 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" event={"ID":"ae285841-6883-40fd-aa4c-dc13e1afdf95","Type":"ContainerDied","Data":"cf51382140cc95b0204a58895b7628e8d351e46c4718b09c604ff064c71bcd79"} Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.149778 4734 scope.go:117] "RemoveContainer" containerID="cf51382140cc95b0204a58895b7628e8d351e46c4718b09c604ff064c71bcd79" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.150145 4734 status_manager.go:851] "Failed to get status for pod" podUID="ae285841-6883-40fd-aa4c-dc13e1afdf95" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/mariadb-operator-controller-manager-7d8c8fd467-kkqb8\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.151159 4734 status_manager.go:851] "Failed to get status for pod" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/keystone-operator-controller-manager-59fbfdbcd7-cvvvx\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.151561 4734 status_manager.go:851] "Failed to get status for pod" podUID="d552db43-2924-4931-a15d-f8803531210f" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/rabbitmq-cluster-operator-779fc9694b-7tv4k\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.151845 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.152208 4734 status_manager.go:851] "Failed to get status for pod" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-664f78b8b-n7g4f\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.154745 4734 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="1613fcb298c822b752c487987371f2bf050a42878bf4d9d71e9fc19f50562e6d" exitCode=0 Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.154823 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"1613fcb298c822b752c487987371f2bf050a42878bf4d9d71e9fc19f50562e6d"} Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.154863 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"9c106a78c5ee29a751d3cbbd89ff963f50a18a094e5df16defb1ce549d8cb9dd"} Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.155118 4734 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.155140 4734 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.155472 4734 status_manager.go:851] "Failed to get status for pod" podUID="ae285841-6883-40fd-aa4c-dc13e1afdf95" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/mariadb-operator-controller-manager-7d8c8fd467-kkqb8\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: E1125 10:02:08.155492 4734 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.155999 4734 status_manager.go:851] "Failed to get status for pod" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/keystone-operator-controller-manager-59fbfdbcd7-cvvvx\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.156668 4734 status_manager.go:851] "Failed to get status for pod" podUID="d552db43-2924-4931-a15d-f8803531210f" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/rabbitmq-cluster-operator-779fc9694b-7tv4k\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.157322 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.157630 4734 generic.go:334] "Generic (PLEG): container finished" podID="842336e7-3fca-4ce9-b030-735f9fa84367" containerID="4aebda91f0ccf0a9298c9c33f02a6c93fbeeaa226d2fb90d9f62c649feba01fd" exitCode=1 Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.157662 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" event={"ID":"842336e7-3fca-4ce9-b030-735f9fa84367","Type":"ContainerDied","Data":"4aebda91f0ccf0a9298c9c33f02a6c93fbeeaa226d2fb90d9f62c649feba01fd"} Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.157633 4734 status_manager.go:851] "Failed to get status for pod" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-664f78b8b-n7g4f\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.158062 4734 scope.go:117] "RemoveContainer" containerID="4aebda91f0ccf0a9298c9c33f02a6c93fbeeaa226d2fb90d9f62c649feba01fd" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.158440 4734 status_manager.go:851] "Failed to get status for pod" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.159232 4734 status_manager.go:851] "Failed to get status for pod" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-664f78b8b-n7g4f\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.160759 4734 status_manager.go:851] "Failed to get status for pod" podUID="ae285841-6883-40fd-aa4c-dc13e1afdf95" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/mariadb-operator-controller-manager-7d8c8fd467-kkqb8\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.161555 4734 status_manager.go:851] "Failed to get status for pod" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/keystone-operator-controller-manager-59fbfdbcd7-cvvvx\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.162117 4734 status_manager.go:851] "Failed to get status for pod" podUID="842336e7-3fca-4ce9-b030-735f9fa84367" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/infra-operator-controller-manager-68875f9666-bkfdd\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.162450 4734 status_manager.go:851] "Failed to get status for pod" podUID="d552db43-2924-4931-a15d-f8803531210f" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/rabbitmq-cluster-operator-779fc9694b-7tv4k\": dial tcp 38.102.83.5:6443: connect: connection refused" Nov 25 10:02:08 crc kubenswrapper[4734]: I1125 10:02:08.203994 4734 scope.go:117] "RemoveContainer" containerID="cb4bfa871f9f584476931d0e330c8b0d41e81993189fdcea0d71d5ed616d55dd" Nov 25 10:02:08 crc kubenswrapper[4734]: E1125 10:02:08.462127 4734 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.5:6443: connect: connection refused" interval="3.2s" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.170607 4734 generic.go:334] "Generic (PLEG): container finished" podID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" containerID="01b7f6f3fedaa6d39c4109f6c117ed74ff53a21123a94dbe9c581a9fa405da83" exitCode=1 Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.170706 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" event={"ID":"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6","Type":"ContainerDied","Data":"01b7f6f3fedaa6d39c4109f6c117ed74ff53a21123a94dbe9c581a9fa405da83"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.170929 4734 scope.go:117] "RemoveContainer" containerID="a35b7abc65ef7865dc9477490333d7276d0cf7ccdd883315fe1776db28642be6" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.171398 4734 scope.go:117] "RemoveContainer" containerID="01b7f6f3fedaa6d39c4109f6c117ed74ff53a21123a94dbe9c581a9fa405da83" Nov 25 10:02:09 crc kubenswrapper[4734]: E1125 10:02:09.171632 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-664f78b8b-n7g4f_metallb-system(e96f81f5-7d7b-4580-9174-f5b46d5f1ea6)\"" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.173297 4734 generic.go:334] "Generic (PLEG): container finished" podID="d552db43-2924-4931-a15d-f8803531210f" containerID="f5e3a2fc81e7dc7ea87913f8ffa2dd3dc1f7b9b82dfb7d2072d7ba03ff56b862" exitCode=1 Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.173374 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" event={"ID":"d552db43-2924-4931-a15d-f8803531210f","Type":"ContainerDied","Data":"f5e3a2fc81e7dc7ea87913f8ffa2dd3dc1f7b9b82dfb7d2072d7ba03ff56b862"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.173928 4734 scope.go:117] "RemoveContainer" containerID="f5e3a2fc81e7dc7ea87913f8ffa2dd3dc1f7b9b82dfb7d2072d7ba03ff56b862" Nov 25 10:02:09 crc kubenswrapper[4734]: E1125 10:02:09.174140 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-779fc9694b-7tv4k_openstack-operators(d552db43-2924-4931-a15d-f8803531210f)\"" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" podUID="d552db43-2924-4931-a15d-f8803531210f" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.179155 4734 generic.go:334] "Generic (PLEG): container finished" podID="ae285841-6883-40fd-aa4c-dc13e1afdf95" containerID="58bf4e10964a479715d8b033d8f859d7cc356d27ef369170a883af2107d46c45" exitCode=1 Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.179253 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" event={"ID":"ae285841-6883-40fd-aa4c-dc13e1afdf95","Type":"ContainerDied","Data":"58bf4e10964a479715d8b033d8f859d7cc356d27ef369170a883af2107d46c45"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.179868 4734 scope.go:117] "RemoveContainer" containerID="58bf4e10964a479715d8b033d8f859d7cc356d27ef369170a883af2107d46c45" Nov 25 10:02:09 crc kubenswrapper[4734]: E1125 10:02:09.180096 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-7d8c8fd467-kkqb8_openstack-operators(ae285841-6883-40fd-aa4c-dc13e1afdf95)\"" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" podUID="ae285841-6883-40fd-aa4c-dc13e1afdf95" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.184358 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7f5dffb0b79e3a0a53b35d43afb8a00fd9b79ba471f845a5eb50274a927520cc"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.184396 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e62d99288f7a9210cc50d3803bd54f97c312b58c992b7952d919715683c76205"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.184406 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2e7d060c2442cecbb1a26d1dd0a375770f76123fa26a1da8067e58da00bc242d"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.184414 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fcdf4c960e6c34a9399ee20db3877e191def63843cab051fbceb918f215e0357"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.190856 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.190901 4734 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa" exitCode=1 Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.191021 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.191674 4734 scope.go:117] "RemoveContainer" containerID="3ce031ad35af3b8b579fff44802bb8ddc5cd0445ce7942c0470811a9b537aefa" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.194421 4734 generic.go:334] "Generic (PLEG): container finished" podID="842336e7-3fca-4ce9-b030-735f9fa84367" containerID="1a5ce3581772e8f9b0576548b49ac4fb1ccda9a4c6ab82c37a09d641b6fa1ab0" exitCode=1 Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.194476 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" event={"ID":"842336e7-3fca-4ce9-b030-735f9fa84367","Type":"ContainerDied","Data":"1a5ce3581772e8f9b0576548b49ac4fb1ccda9a4c6ab82c37a09d641b6fa1ab0"} Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.195236 4734 scope.go:117] "RemoveContainer" containerID="1a5ce3581772e8f9b0576548b49ac4fb1ccda9a4c6ab82c37a09d641b6fa1ab0" Nov 25 10:02:09 crc kubenswrapper[4734]: E1125 10:02:09.195472 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-68875f9666-bkfdd_openstack-operators(842336e7-3fca-4ce9-b030-735f9fa84367)\"" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" podUID="842336e7-3fca-4ce9-b030-735f9fa84367" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.203069 4734 scope.go:117] "RemoveContainer" containerID="f34b64d81faf9bb122db3a9a879fb3859c937a4d1f0890de89ccae859bc57ee4" Nov 25 10:02:09 crc kubenswrapper[4734]: E1125 10:02:09.203357 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-59fbfdbcd7-cvvvx_openstack-operators(eeac7687-6578-41aa-99da-c576d6162d9e)\"" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.285550 4734 scope.go:117] "RemoveContainer" containerID="9f7dc98b5bceffa7f3567dc1c51ef65556d04b72bb95638f87897d1f47192b6a" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.355689 4734 scope.go:117] "RemoveContainer" containerID="cf51382140cc95b0204a58895b7628e8d351e46c4718b09c604ff064c71bcd79" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.405573 4734 scope.go:117] "RemoveContainer" containerID="4aebda91f0ccf0a9298c9c33f02a6c93fbeeaa226d2fb90d9f62c649feba01fd" Nov 25 10:02:09 crc kubenswrapper[4734]: I1125 10:02:09.622888 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 10:02:10 crc kubenswrapper[4734]: I1125 10:02:10.211226 4734 scope.go:117] "RemoveContainer" containerID="58bf4e10964a479715d8b033d8f859d7cc356d27ef369170a883af2107d46c45" Nov 25 10:02:10 crc kubenswrapper[4734]: E1125 10:02:10.213031 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-7d8c8fd467-kkqb8_openstack-operators(ae285841-6883-40fd-aa4c-dc13e1afdf95)\"" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" podUID="ae285841-6883-40fd-aa4c-dc13e1afdf95" Nov 25 10:02:10 crc kubenswrapper[4734]: I1125 10:02:10.216069 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 10:02:10 crc kubenswrapper[4734]: I1125 10:02:10.216188 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"dd776641e0afd8c57d25a62312579cf42d05f54840b18d2bec9dea5a2d9e4a4a"} Nov 25 10:02:10 crc kubenswrapper[4734]: I1125 10:02:10.220309 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"af18f0a6a37e2b5fa70db4798eb12dd4619580d0f4daf58e5aac8c190930f522"} Nov 25 10:02:10 crc kubenswrapper[4734]: I1125 10:02:10.220481 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:10 crc kubenswrapper[4734]: I1125 10:02:10.220632 4734 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:10 crc kubenswrapper[4734]: I1125 10:02:10.220672 4734 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:12 crc kubenswrapper[4734]: I1125 10:02:12.211275 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 10:02:12 crc kubenswrapper[4734]: I1125 10:02:12.277362 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:12 crc kubenswrapper[4734]: I1125 10:02:12.277454 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:12 crc kubenswrapper[4734]: I1125 10:02:12.283981 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:13 crc kubenswrapper[4734]: I1125 10:02:13.022294 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 10:02:13 crc kubenswrapper[4734]: I1125 10:02:13.022886 4734 scope.go:117] "RemoveContainer" containerID="01b7f6f3fedaa6d39c4109f6c117ed74ff53a21123a94dbe9c581a9fa405da83" Nov 25 10:02:13 crc kubenswrapper[4734]: E1125 10:02:13.023125 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-664f78b8b-n7g4f_metallb-system(e96f81f5-7d7b-4580-9174-f5b46d5f1ea6)\"" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" Nov 25 10:02:15 crc kubenswrapper[4734]: I1125 10:02:15.236385 4734 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:15 crc kubenswrapper[4734]: I1125 10:02:15.352565 4734 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9065715d-8346-4e64-890e-4ae3e7cb74ff" Nov 25 10:02:16 crc kubenswrapper[4734]: I1125 10:02:16.262246 4734 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:16 crc kubenswrapper[4734]: I1125 10:02:16.262279 4734 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:16 crc kubenswrapper[4734]: I1125 10:02:16.265764 4734 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9065715d-8346-4e64-890e-4ae3e7cb74ff" Nov 25 10:02:16 crc kubenswrapper[4734]: I1125 10:02:16.266491 4734 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://fcdf4c960e6c34a9399ee20db3877e191def63843cab051fbceb918f215e0357" Nov 25 10:02:16 crc kubenswrapper[4734]: I1125 10:02:16.266523 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:17 crc kubenswrapper[4734]: I1125 10:02:17.271445 4734 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:17 crc kubenswrapper[4734]: I1125 10:02:17.271833 4734 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="6f695112-1be5-4918-9796-79c5f6cf7855" Nov 25 10:02:17 crc kubenswrapper[4734]: I1125 10:02:17.274868 4734 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="9065715d-8346-4e64-890e-4ae3e7cb74ff" Nov 25 10:02:17 crc kubenswrapper[4734]: I1125 10:02:17.492676 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 10:02:17 crc kubenswrapper[4734]: I1125 10:02:17.492775 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 10:02:17 crc kubenswrapper[4734]: I1125 10:02:17.493648 4734 scope.go:117] "RemoveContainer" containerID="f34b64d81faf9bb122db3a9a879fb3859c937a4d1f0890de89ccae859bc57ee4" Nov 25 10:02:17 crc kubenswrapper[4734]: I1125 10:02:17.975756 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 10:02:17 crc kubenswrapper[4734]: I1125 10:02:17.976834 4734 scope.go:117] "RemoveContainer" containerID="1a5ce3581772e8f9b0576548b49ac4fb1ccda9a4c6ab82c37a09d641b6fa1ab0" Nov 25 10:02:17 crc kubenswrapper[4734]: E1125 10:02:17.977171 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-68875f9666-bkfdd_openstack-operators(842336e7-3fca-4ce9-b030-735f9fa84367)\"" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" podUID="842336e7-3fca-4ce9-b030-735f9fa84367" Nov 25 10:02:18 crc kubenswrapper[4734]: I1125 10:02:18.282670 4734 generic.go:334] "Generic (PLEG): container finished" podID="eeac7687-6578-41aa-99da-c576d6162d9e" containerID="f8ecdd20cc2f76d3611515721cf6a7ee9ac2d0e6f5554dd3b882b8027be70878" exitCode=1 Nov 25 10:02:18 crc kubenswrapper[4734]: I1125 10:02:18.282746 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" event={"ID":"eeac7687-6578-41aa-99da-c576d6162d9e","Type":"ContainerDied","Data":"f8ecdd20cc2f76d3611515721cf6a7ee9ac2d0e6f5554dd3b882b8027be70878"} Nov 25 10:02:18 crc kubenswrapper[4734]: I1125 10:02:18.282797 4734 scope.go:117] "RemoveContainer" containerID="f34b64d81faf9bb122db3a9a879fb3859c937a4d1f0890de89ccae859bc57ee4" Nov 25 10:02:18 crc kubenswrapper[4734]: I1125 10:02:18.283652 4734 scope.go:117] "RemoveContainer" containerID="f8ecdd20cc2f76d3611515721cf6a7ee9ac2d0e6f5554dd3b882b8027be70878" Nov 25 10:02:18 crc kubenswrapper[4734]: E1125 10:02:18.284387 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-59fbfdbcd7-cvvvx_openstack-operators(eeac7687-6578-41aa-99da-c576d6162d9e)\"" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" Nov 25 10:02:18 crc kubenswrapper[4734]: I1125 10:02:18.758777 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 10:02:18 crc kubenswrapper[4734]: I1125 10:02:18.766447 4734 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 10:02:19 crc kubenswrapper[4734]: I1125 10:02:19.295419 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 10:02:19 crc kubenswrapper[4734]: I1125 10:02:19.623024 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 10:02:19 crc kubenswrapper[4734]: I1125 10:02:19.623956 4734 scope.go:117] "RemoveContainer" containerID="58bf4e10964a479715d8b033d8f859d7cc356d27ef369170a883af2107d46c45" Nov 25 10:02:20 crc kubenswrapper[4734]: I1125 10:02:20.300937 4734 generic.go:334] "Generic (PLEG): container finished" podID="ae285841-6883-40fd-aa4c-dc13e1afdf95" containerID="9198371358e7a1975cac01d502d0eb4adafdbbec7a103c854c3e31279f409cb1" exitCode=1 Nov 25 10:02:20 crc kubenswrapper[4734]: I1125 10:02:20.301011 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" event={"ID":"ae285841-6883-40fd-aa4c-dc13e1afdf95","Type":"ContainerDied","Data":"9198371358e7a1975cac01d502d0eb4adafdbbec7a103c854c3e31279f409cb1"} Nov 25 10:02:20 crc kubenswrapper[4734]: I1125 10:02:20.301458 4734 scope.go:117] "RemoveContainer" containerID="58bf4e10964a479715d8b033d8f859d7cc356d27ef369170a883af2107d46c45" Nov 25 10:02:20 crc kubenswrapper[4734]: I1125 10:02:20.302368 4734 scope.go:117] "RemoveContainer" containerID="9198371358e7a1975cac01d502d0eb4adafdbbec7a103c854c3e31279f409cb1" Nov 25 10:02:20 crc kubenswrapper[4734]: E1125 10:02:20.302574 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-7d8c8fd467-kkqb8_openstack-operators(ae285841-6883-40fd-aa4c-dc13e1afdf95)\"" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" podUID="ae285841-6883-40fd-aa4c-dc13e1afdf95" Nov 25 10:02:20 crc kubenswrapper[4734]: I1125 10:02:20.695948 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:02:20 crc kubenswrapper[4734]: I1125 10:02:20.696040 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:02:21 crc kubenswrapper[4734]: I1125 10:02:21.247506 4734 scope.go:117] "RemoveContainer" containerID="f5e3a2fc81e7dc7ea87913f8ffa2dd3dc1f7b9b82dfb7d2072d7ba03ff56b862" Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.325154 4734 generic.go:334] "Generic (PLEG): container finished" podID="d552db43-2924-4931-a15d-f8803531210f" containerID="0d9e1a87b0142f711b37d0935ba9c53c79321467effb62304c2930ce6090a41a" exitCode=1 Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.325200 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" event={"ID":"d552db43-2924-4931-a15d-f8803531210f","Type":"ContainerDied","Data":"0d9e1a87b0142f711b37d0935ba9c53c79321467effb62304c2930ce6090a41a"} Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.325236 4734 scope.go:117] "RemoveContainer" containerID="f5e3a2fc81e7dc7ea87913f8ffa2dd3dc1f7b9b82dfb7d2072d7ba03ff56b862" Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.325848 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.325916 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:22.825894024 +0000 UTC m=+2065.636356028 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.326003 4734 scope.go:117] "RemoveContainer" containerID="0d9e1a87b0142f711b37d0935ba9c53c79321467effb62304c2930ce6090a41a" Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.326188 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.326264 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.326301 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-779fc9694b-7tv4k_openstack-operators(d552db43-2924-4931-a15d-f8803531210f)\"" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" podUID="d552db43-2924-4931-a15d-f8803531210f" Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.326361 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:22.826302806 +0000 UTC m=+2065.636764910 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-config-data" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.326417 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:22.826394839 +0000 UTC m=+2065.636857103 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-scripts" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.326431 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.326539 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:22.826519113 +0000 UTC m=+2065.636981137 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.417009 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.587656 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.599082 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.780070 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.831837 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.832233 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:23.832211302 +0000 UTC m=+2066.642673306 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.831875 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.832451 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:23.832440419 +0000 UTC m=+2066.642902423 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-scripts" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.831867 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.832620 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:23.832609914 +0000 UTC m=+2066.643071918 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.832001 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 10:02:22 crc kubenswrapper[4734]: E1125 10:02:22.833896 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:23.833882041 +0000 UTC m=+2066.644344045 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-config-data" not found Nov 25 10:02:22 crc kubenswrapper[4734]: I1125 10:02:22.921574 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 10:02:23 crc kubenswrapper[4734]: I1125 10:02:23.209536 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 10:02:23 crc kubenswrapper[4734]: I1125 10:02:23.323177 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 10:02:23 crc kubenswrapper[4734]: I1125 10:02:23.490860 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 10:02:23 crc kubenswrapper[4734]: I1125 10:02:23.738558 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 10:02:23 crc kubenswrapper[4734]: E1125 10:02:23.849013 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 10:02:23 crc kubenswrapper[4734]: E1125 10:02:23.849110 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 10:02:23 crc kubenswrapper[4734]: E1125 10:02:23.849128 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:25.849113941 +0000 UTC m=+2068.659575935 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-config-data" not found Nov 25 10:02:23 crc kubenswrapper[4734]: E1125 10:02:23.849151 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:25.849141182 +0000 UTC m=+2068.659603166 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-scripts" not found Nov 25 10:02:23 crc kubenswrapper[4734]: E1125 10:02:23.849188 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:23 crc kubenswrapper[4734]: E1125 10:02:23.849311 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:25.849284326 +0000 UTC m=+2068.659746360 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:23 crc kubenswrapper[4734]: E1125 10:02:23.849306 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:23 crc kubenswrapper[4734]: E1125 10:02:23.849526 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:25.849465681 +0000 UTC m=+2068.659927825 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:24 crc kubenswrapper[4734]: I1125 10:02:24.246926 4734 scope.go:117] "RemoveContainer" containerID="01b7f6f3fedaa6d39c4109f6c117ed74ff53a21123a94dbe9c581a9fa405da83" Nov 25 10:02:25 crc kubenswrapper[4734]: I1125 10:02:25.137041 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 10:02:25 crc kubenswrapper[4734]: I1125 10:02:25.356213 4734 generic.go:334] "Generic (PLEG): container finished" podID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" containerID="521a026c4c6c0b2a081f07e8e80229365b5cd5d0dbbd8e0ecc652b90b87efde7" exitCode=1 Nov 25 10:02:25 crc kubenswrapper[4734]: I1125 10:02:25.356290 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" event={"ID":"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6","Type":"ContainerDied","Data":"521a026c4c6c0b2a081f07e8e80229365b5cd5d0dbbd8e0ecc652b90b87efde7"} Nov 25 10:02:25 crc kubenswrapper[4734]: I1125 10:02:25.356358 4734 scope.go:117] "RemoveContainer" containerID="01b7f6f3fedaa6d39c4109f6c117ed74ff53a21123a94dbe9c581a9fa405da83" Nov 25 10:02:25 crc kubenswrapper[4734]: I1125 10:02:25.357263 4734 scope.go:117] "RemoveContainer" containerID="521a026c4c6c0b2a081f07e8e80229365b5cd5d0dbbd8e0ecc652b90b87efde7" Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.357876 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-664f78b8b-n7g4f_metallb-system(e96f81f5-7d7b-4580-9174-f5b46d5f1ea6)\"" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.883344 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.883638 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.883688 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:29.88367129 +0000 UTC m=+2072.694133284 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.883432 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.883449 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.883739 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:29.883714281 +0000 UTC m=+2072.694176305 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-scripts" not found Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.883852 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:29.883830514 +0000 UTC m=+2072.694292518 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-config-data" not found Nov 25 10:02:25 crc kubenswrapper[4734]: E1125 10:02:25.883872 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:29.883864055 +0000 UTC m=+2072.694326059 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:26 crc kubenswrapper[4734]: I1125 10:02:26.527462 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 10:02:26 crc kubenswrapper[4734]: I1125 10:02:26.650169 4734 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.004470 4734 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.142108 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-index-dockercfg-txb4w" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.250847 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.281713 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.335624 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.492103 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.493449 4734 scope.go:117] "RemoveContainer" containerID="f8ecdd20cc2f76d3611515721cf6a7ee9ac2d0e6f5554dd3b882b8027be70878" Nov 25 10:02:27 crc kubenswrapper[4734]: E1125 10:02:27.493886 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-59fbfdbcd7-cvvvx_openstack-operators(eeac7687-6578-41aa-99da-c576d6162d9e)\"" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.622025 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.802708 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.843406 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.975840 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 10:02:27 crc kubenswrapper[4734]: I1125 10:02:27.976711 4734 scope.go:117] "RemoveContainer" containerID="1a5ce3581772e8f9b0576548b49ac4fb1ccda9a4c6ab82c37a09d641b6fa1ab0" Nov 25 10:02:28 crc kubenswrapper[4734]: I1125 10:02:28.385036 4734 generic.go:334] "Generic (PLEG): container finished" podID="842336e7-3fca-4ce9-b030-735f9fa84367" containerID="ed8ac14facbc327a5917dfb1695284bce3a51c6de71726d135485e55b1b0c91d" exitCode=1 Nov 25 10:02:28 crc kubenswrapper[4734]: I1125 10:02:28.385117 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" event={"ID":"842336e7-3fca-4ce9-b030-735f9fa84367","Type":"ContainerDied","Data":"ed8ac14facbc327a5917dfb1695284bce3a51c6de71726d135485e55b1b0c91d"} Nov 25 10:02:28 crc kubenswrapper[4734]: I1125 10:02:28.385408 4734 scope.go:117] "RemoveContainer" containerID="1a5ce3581772e8f9b0576548b49ac4fb1ccda9a4c6ab82c37a09d641b6fa1ab0" Nov 25 10:02:28 crc kubenswrapper[4734]: I1125 10:02:28.386498 4734 scope.go:117] "RemoveContainer" containerID="ed8ac14facbc327a5917dfb1695284bce3a51c6de71726d135485e55b1b0c91d" Nov 25 10:02:28 crc kubenswrapper[4734]: E1125 10:02:28.387598 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-68875f9666-bkfdd_openstack-operators(842336e7-3fca-4ce9-b030-735f9fa84367)\"" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" podUID="842336e7-3fca-4ce9-b030-735f9fa84367" Nov 25 10:02:28 crc kubenswrapper[4734]: I1125 10:02:28.787385 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.004939 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-scripts" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.017806 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.144849 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.168422 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.219952 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.273351 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.295453 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.310881 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.363713 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.606899 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.623317 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.623949 4734 scope.go:117] "RemoveContainer" containerID="9198371358e7a1975cac01d502d0eb4adafdbbec7a103c854c3e31279f409cb1" Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.624184 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-7d8c8fd467-kkqb8_openstack-operators(ae285841-6883-40fd-aa4c-dc13e1afdf95)\"" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" podUID="ae285841-6883-40fd-aa4c-dc13e1afdf95" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.755868 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.828670 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.912457 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.947986 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.948239 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.948003 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.948277 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:37.948260797 +0000 UTC m=+2080.758722791 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-scripts" not found Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.948312 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:37.948291008 +0000 UTC m=+2080.758753052 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.948244 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.948335 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:37.948326069 +0000 UTC m=+2080.758788153 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-config-data" not found Nov 25 10:02:29 crc kubenswrapper[4734]: E1125 10:02:29.948381 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:37.94835824 +0000 UTC m=+2080.758820274 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:29 crc kubenswrapper[4734]: I1125 10:02:29.975146 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.023419 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.099614 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.118928 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.189401 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.231685 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.236540 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.267677 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.306452 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-config-data" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.364144 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.499339 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.525512 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.533385 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.644073 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.658987 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.662676 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.669744 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.727020 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.767430 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.820760 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.864684 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.978729 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"openshift-service-ca.crt" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.983891 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 10:02:30 crc kubenswrapper[4734]: I1125 10:02:30.994775 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.041035 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.062692 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.119491 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.137463 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.190405 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.238408 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.322934 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.353274 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.382562 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.569640 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.601427 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.630289 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.682327 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-92b7n" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.807236 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"memcached-config-data" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.819791 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.934841 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 10:02:31 crc kubenswrapper[4734]: I1125 10:02:31.992520 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.025602 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.028841 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.134550 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"rabbitmq-server-conf" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.166944 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.167439 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.202873 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.251766 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.261314 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.297280 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.464030 4734 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.485800 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.563560 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"kube-root-ca.crt" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.644315 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.722539 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.785430 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.786416 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.807119 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.821143 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.825225 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.825863 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-dockercfg-p2xtm" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.872023 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.873662 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.877889 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 10:02:32 crc kubenswrapper[4734]: I1125 10:02:32.974423 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.022546 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.023133 4734 scope.go:117] "RemoveContainer" containerID="521a026c4c6c0b2a081f07e8e80229365b5cd5d0dbbd8e0ecc652b90b87efde7" Nov 25 10:02:33 crc kubenswrapper[4734]: E1125 10:02:33.023331 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-664f78b8b-n7g4f_metallb-system(e96f81f5-7d7b-4580-9174-f5b46d5f1ea6)\"" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" podUID="e96f81f5-7d7b-4580-9174-f5b46d5f1ea6" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.120164 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-r68k6" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.137024 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.173944 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.191482 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-service-cert" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.214014 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.233965 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.347037 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"rabbitmq-plugins-conf" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.362781 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.391362 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.399137 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.427901 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.435426 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.541233 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.596813 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.640935 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.647110 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.723802 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.772643 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.820228 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.910662 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.915253 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.929760 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.961069 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.984251 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"rabbitmq-erlang-cookie" Nov 25 10:02:33 crc kubenswrapper[4734]: I1125 10:02:33.997849 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.015061 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.023804 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.024506 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.091484 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.094235 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.112847 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.141666 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.187248 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.254836 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.364215 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-j9gqb" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.461261 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.485659 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.539946 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.603329 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.631605 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.678447 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.750888 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.756014 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.763177 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.823193 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.833885 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.923799 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.961877 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.979651 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 10:02:34 crc kubenswrapper[4734]: I1125 10:02:34.994278 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.092232 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.097269 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"keystone-keystone-dockercfg-4ws7h" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.175485 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.302932 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.358556 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.566308 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.574352 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.625725 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.665406 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.683964 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.689581 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.736469 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.803438 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.829806 4734 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.836816 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.836868 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.842660 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.869603 4734 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=20.869585212 podStartE2EDuration="20.869585212s" podCreationTimestamp="2025-11-25 10:02:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:02:35.86089407 +0000 UTC m=+2078.671356084" watchObservedRunningTime="2025-11-25 10:02:35.869585212 +0000 UTC m=+2078.680047216" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.957634 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.963857 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 10:02:35 crc kubenswrapper[4734]: I1125 10:02:35.967224 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-index-dockercfg-zhd86" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.007068 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.024919 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.100548 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.149606 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.158515 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.161989 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.188396 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.247432 4734 scope.go:117] "RemoveContainer" containerID="0d9e1a87b0142f711b37d0935ba9c53c79321467effb62304c2930ce6090a41a" Nov 25 10:02:36 crc kubenswrapper[4734]: E1125 10:02:36.247883 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-779fc9694b-7tv4k_openstack-operators(d552db43-2924-4931-a15d-f8803531210f)\"" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" podUID="d552db43-2924-4931-a15d-f8803531210f" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.275215 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.422416 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.636514 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.705016 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.707030 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.759946 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.779805 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.862917 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.905953 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.911570 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.933180 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.940400 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-tj5px" Nov 25 10:02:36 crc kubenswrapper[4734]: I1125 10:02:36.984400 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"openstack-config-data" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.017096 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.107871 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.119619 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.194202 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.274732 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.282468 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.286461 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"memcached-memcached-dockercfg-75mlz" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.299282 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.314723 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.378810 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.399771 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.463655 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.493171 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.493822 4734 scope.go:117] "RemoveContainer" containerID="f8ecdd20cc2f76d3611515721cf6a7ee9ac2d0e6f5554dd3b882b8027be70878" Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.494015 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-59fbfdbcd7-cvvvx_openstack-operators(eeac7687-6578-41aa-99da-c576d6162d9e)\"" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" podUID="eeac7687-6578-41aa-99da-c576d6162d9e" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.568894 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.602730 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.602963 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.713591 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.782252 4734 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.782589 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc" gracePeriod=5 Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.946748 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.975781 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 10:02:37 crc kubenswrapper[4734]: I1125 10:02:37.976659 4734 scope.go:117] "RemoveContainer" containerID="ed8ac14facbc327a5917dfb1695284bce3a51c6de71726d135485e55b1b0c91d" Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.977213 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-68875f9666-bkfdd_openstack-operators(842336e7-3fca-4ce9-b030-735f9fa84367)\"" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" podUID="842336e7-3fca-4ce9-b030-735f9fa84367" Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.978939 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-config-data: secret "keystone-config-data" not found Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.978992 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:53.978977089 +0000 UTC m=+2096.789439083 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-config-data" not found Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.979044 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.979145 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:53.979126364 +0000 UTC m=+2096.789588358 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "fernet-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.979184 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone-scripts: secret "keystone-scripts" not found Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.979295 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:53.979271308 +0000 UTC m=+2096.789733312 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone-scripts" not found Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.979678 4734 secret.go:188] Couldn't get secret keystone-kuttl-tests/keystone: secret "keystone" not found Nov 25 10:02:37 crc kubenswrapper[4734]: E1125 10:02:37.979925 4734 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys podName:36c08187-0f8e-495f-8cf9-dfbdf7c825d1 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:53.979893816 +0000 UTC m=+2096.790355850 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "credential-keys" (UniqueName: "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys") pod "keystone-55c98d597d-78j2k" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1") : secret "keystone" not found Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.006731 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.010209 4734 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.010247 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.053959 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"galera-openstack-dockercfg-jjfp6" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.088273 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.143711 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.149807 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.387752 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-service-cert" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.528534 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.531275 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.537956 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.568164 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.614545 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.701069 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.748759 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.764484 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.839057 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.841963 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.849372 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.861773 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 10:02:38 crc kubenswrapper[4734]: I1125 10:02:38.929198 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.011152 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.239675 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.251484 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.269198 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.319436 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.323543 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.353128 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.375790 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.394606 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.480519 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-b7h8z" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.533008 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.594419 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.623008 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.624498 4734 scope.go:117] "RemoveContainer" containerID="9198371358e7a1975cac01d502d0eb4adafdbbec7a103c854c3e31279f409cb1" Nov 25 10:02:39 crc kubenswrapper[4734]: E1125 10:02:39.624931 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-7d8c8fd467-kkqb8_openstack-operators(ae285841-6883-40fd-aa4c-dc13e1afdf95)\"" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" podUID="ae285841-6883-40fd-aa4c-dc13e1afdf95" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.691454 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.708967 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.732888 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.770731 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"rabbitmq-server-dockercfg-k5qzt" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.786781 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.887543 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.911459 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"keystone-kuttl-tests"/"openstack-scripts" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.916470 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 10:02:39 crc kubenswrapper[4734]: I1125 10:02:39.932678 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.001640 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.007428 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.023946 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.031584 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.036382 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.098328 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.194706 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.353734 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.374513 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.518655 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.641154 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.837958 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-index-dockercfg-lppsb" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.859185 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.880518 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 10:02:40 crc kubenswrapper[4734]: I1125 10:02:40.974911 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.030595 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.049447 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.089168 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.098004 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.118274 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.139815 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.221922 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-55c98d597d-78j2k"] Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.222166 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" podUID="36c08187-0f8e-495f-8cf9-dfbdf7c825d1" containerName="keystone-api" containerID="cri-o://344a2f0495c327ad8325a37efa5c2d5bacc8b089d7af8b77b6669aa1b4649ecc" gracePeriod=30 Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.224787 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-cron-29401081-zg5rm"] Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.228708 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-cron-29401081-zg5rm"] Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.229038 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.259001 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.365582 4734 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.453146 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.497782 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.576936 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.668598 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.682094 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-index-dockercfg-x9h95" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.705760 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.714963 4734 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-jjgzz" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.725285 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 10:02:41 crc kubenswrapper[4734]: I1125 10:02:41.776045 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.025383 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.179883 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.206874 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.232747 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-service-cert" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.253393 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533" path="/var/lib/kubelet/pods/d8aa7b70-a40b-4f0c-8e2b-1b46fe1dd533/volumes" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.430320 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.478795 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.497663 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.532476 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.685850 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.774017 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-vv559" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.775282 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.877136 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 10:02:42 crc kubenswrapper[4734]: I1125 10:02:42.942352 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.063036 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.167990 4734 reflector.go:368] Caches populated for *v1.Secret from object-"keystone-kuttl-tests"/"rabbitmq-default-user" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.342558 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.378768 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.378837 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.459701 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.459774 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.459795 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.459811 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.459829 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.459867 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.459884 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.460121 4734 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.460131 4734 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.460158 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.460180 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.466177 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.513570 4734 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.514368 4734 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.514409 4734 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc" exitCode=137 Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.514446 4734 scope.go:117] "RemoveContainer" containerID="4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.514528 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.533589 4734 scope.go:117] "RemoveContainer" containerID="4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc" Nov 25 10:02:43 crc kubenswrapper[4734]: E1125 10:02:43.534097 4734 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc\": container with ID starting with 4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc not found: ID does not exist" containerID="4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.534142 4734 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc"} err="failed to get container status \"4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc\": rpc error: code = NotFound desc = could not find container \"4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc\": container with ID starting with 4b34d938e91f52dce9be427e15e1b8f144b7df36d061406a59dfbce94d03b0bc not found: ID does not exist" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.561408 4734 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.561448 4734 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:43 crc kubenswrapper[4734]: I1125 10:02:43.561460 4734 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.255678 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.528076 4734 generic.go:334] "Generic (PLEG): container finished" podID="36c08187-0f8e-495f-8cf9-dfbdf7c825d1" containerID="344a2f0495c327ad8325a37efa5c2d5bacc8b089d7af8b77b6669aa1b4649ecc" exitCode=0 Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.528157 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" event={"ID":"36c08187-0f8e-495f-8cf9-dfbdf7c825d1","Type":"ContainerDied","Data":"344a2f0495c327ad8325a37efa5c2d5bacc8b089d7af8b77b6669aa1b4649ecc"} Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.747358 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.884340 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys\") pod \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.884709 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data\") pod \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.884773 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys\") pod \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.884862 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45g9x\" (UniqueName: \"kubernetes.io/projected/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-kube-api-access-45g9x\") pod \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.885167 4734 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts\") pod \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\" (UID: \"36c08187-0f8e-495f-8cf9-dfbdf7c825d1\") " Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.888770 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "36c08187-0f8e-495f-8cf9-dfbdf7c825d1" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.888903 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-kube-api-access-45g9x" (OuterVolumeSpecName: "kube-api-access-45g9x") pod "36c08187-0f8e-495f-8cf9-dfbdf7c825d1" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1"). InnerVolumeSpecName "kube-api-access-45g9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.889044 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts" (OuterVolumeSpecName: "scripts") pod "36c08187-0f8e-495f-8cf9-dfbdf7c825d1" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.889484 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "36c08187-0f8e-495f-8cf9-dfbdf7c825d1" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.914261 4734 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data" (OuterVolumeSpecName: "config-data") pod "36c08187-0f8e-495f-8cf9-dfbdf7c825d1" (UID: "36c08187-0f8e-495f-8cf9-dfbdf7c825d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.986535 4734 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.986588 4734 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.986608 4734 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.986624 4734 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:44 crc kubenswrapper[4734]: I1125 10:02:44.986646 4734 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45g9x\" (UniqueName: \"kubernetes.io/projected/36c08187-0f8e-495f-8cf9-dfbdf7c825d1-kube-api-access-45g9x\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:45 crc kubenswrapper[4734]: I1125 10:02:45.371924 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 10:02:45 crc kubenswrapper[4734]: I1125 10:02:45.377762 4734 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 10:02:45 crc kubenswrapper[4734]: I1125 10:02:45.536251 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" event={"ID":"36c08187-0f8e-495f-8cf9-dfbdf7c825d1","Type":"ContainerDied","Data":"173cb7a750a2455ce1cdf9ce09f149d2aaf64aaa6ef5a8a314c6c9a9f1145c8d"} Nov 25 10:02:45 crc kubenswrapper[4734]: I1125 10:02:45.536301 4734 scope.go:117] "RemoveContainer" containerID="344a2f0495c327ad8325a37efa5c2d5bacc8b089d7af8b77b6669aa1b4649ecc" Nov 25 10:02:45 crc kubenswrapper[4734]: I1125 10:02:45.536336 4734 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone-55c98d597d-78j2k" Nov 25 10:02:45 crc kubenswrapper[4734]: I1125 10:02:45.578665 4734 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["keystone-kuttl-tests/keystone-55c98d597d-78j2k"] Nov 25 10:02:45 crc kubenswrapper[4734]: I1125 10:02:45.584938 4734 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["keystone-kuttl-tests/keystone-55c98d597d-78j2k"] Nov 25 10:02:46 crc kubenswrapper[4734]: I1125 10:02:46.247603 4734 scope.go:117] "RemoveContainer" containerID="521a026c4c6c0b2a081f07e8e80229365b5cd5d0dbbd8e0ecc652b90b87efde7" Nov 25 10:02:46 crc kubenswrapper[4734]: I1125 10:02:46.258723 4734 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36c08187-0f8e-495f-8cf9-dfbdf7c825d1" path="/var/lib/kubelet/pods/36c08187-0f8e-495f-8cf9-dfbdf7c825d1/volumes" Nov 25 10:02:46 crc kubenswrapper[4734]: I1125 10:02:46.546165 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" event={"ID":"e96f81f5-7d7b-4580-9174-f5b46d5f1ea6","Type":"ContainerStarted","Data":"f1f56dd855cef67d4c74f48f5b709f166bace01c5fd6f3fceca1f323ec8af12d"} Nov 25 10:02:46 crc kubenswrapper[4734]: I1125 10:02:46.546417 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-664f78b8b-n7g4f" Nov 25 10:02:47 crc kubenswrapper[4734]: I1125 10:02:47.975436 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 10:02:47 crc kubenswrapper[4734]: I1125 10:02:47.976457 4734 scope.go:117] "RemoveContainer" containerID="ed8ac14facbc327a5917dfb1695284bce3a51c6de71726d135485e55b1b0c91d" Nov 25 10:02:47 crc kubenswrapper[4734]: E1125 10:02:47.976710 4734 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-68875f9666-bkfdd_openstack-operators(842336e7-3fca-4ce9-b030-735f9fa84367)\"" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" podUID="842336e7-3fca-4ce9-b030-735f9fa84367" Nov 25 10:02:50 crc kubenswrapper[4734]: I1125 10:02:50.255247 4734 scope.go:117] "RemoveContainer" containerID="0d9e1a87b0142f711b37d0935ba9c53c79321467effb62304c2930ce6090a41a" Nov 25 10:02:50 crc kubenswrapper[4734]: I1125 10:02:50.587202 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-779fc9694b-7tv4k" event={"ID":"d552db43-2924-4931-a15d-f8803531210f","Type":"ContainerStarted","Data":"df8642b2d93a38c7978f33c985a79411136f3248d1a49009ce6e60472b75f5a8"} Nov 25 10:02:50 crc kubenswrapper[4734]: I1125 10:02:50.696051 4734 patch_prober.go:28] interesting pod/machine-config-daemon-2n2f8 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:02:50 crc kubenswrapper[4734]: I1125 10:02:50.696180 4734 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:02:50 crc kubenswrapper[4734]: I1125 10:02:50.696233 4734 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" Nov 25 10:02:50 crc kubenswrapper[4734]: I1125 10:02:50.697563 4734 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6e696de2d9262946d4c3e72b1d555d014c0784bdeaeb4cf7a686232b1fb917a1"} pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:02:50 crc kubenswrapper[4734]: I1125 10:02:50.697674 4734 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" podUID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerName="machine-config-daemon" containerID="cri-o://6e696de2d9262946d4c3e72b1d555d014c0784bdeaeb4cf7a686232b1fb917a1" gracePeriod=600 Nov 25 10:02:51 crc kubenswrapper[4734]: I1125 10:02:51.596588 4734 generic.go:334] "Generic (PLEG): container finished" podID="b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1" containerID="6e696de2d9262946d4c3e72b1d555d014c0784bdeaeb4cf7a686232b1fb917a1" exitCode=0 Nov 25 10:02:51 crc kubenswrapper[4734]: I1125 10:02:51.596624 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerDied","Data":"6e696de2d9262946d4c3e72b1d555d014c0784bdeaeb4cf7a686232b1fb917a1"} Nov 25 10:02:51 crc kubenswrapper[4734]: I1125 10:02:51.596922 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2n2f8" event={"ID":"b0d83cc3-d8b2-4fb4-9210-ee1298ae94a1","Type":"ContainerStarted","Data":"c763bfe09b453e22a0a3b3ccd6fccc5eadbb717934fa24a16ae22ef6decf7a5f"} Nov 25 10:02:51 crc kubenswrapper[4734]: I1125 10:02:51.596944 4734 scope.go:117] "RemoveContainer" containerID="2cde696256413d40c1b923bf06ce3eb09b0e05b5e8ec72da135cfa09e520c5db" Nov 25 10:02:52 crc kubenswrapper[4734]: I1125 10:02:52.247828 4734 scope.go:117] "RemoveContainer" containerID="9198371358e7a1975cac01d502d0eb4adafdbbec7a103c854c3e31279f409cb1" Nov 25 10:02:52 crc kubenswrapper[4734]: I1125 10:02:52.248333 4734 scope.go:117] "RemoveContainer" containerID="f8ecdd20cc2f76d3611515721cf6a7ee9ac2d0e6f5554dd3b882b8027be70878" Nov 25 10:02:52 crc kubenswrapper[4734]: I1125 10:02:52.608914 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" event={"ID":"eeac7687-6578-41aa-99da-c576d6162d9e","Type":"ContainerStarted","Data":"76c8dbd8b8d73e9af363ab04af9c8f952b44ab1831c42b2b77aa8403e4d6976a"} Nov 25 10:02:52 crc kubenswrapper[4734]: I1125 10:02:52.610285 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 10:02:52 crc kubenswrapper[4734]: I1125 10:02:52.612067 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" event={"ID":"ae285841-6883-40fd-aa4c-dc13e1afdf95","Type":"ContainerStarted","Data":"8b10a25862ef315808f061e99904c0c23941a0032bb80020251ebc9f14cf533c"} Nov 25 10:02:52 crc kubenswrapper[4734]: I1125 10:02:52.612287 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 10:02:57 crc kubenswrapper[4734]: I1125 10:02:57.496770 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-59fbfdbcd7-cvvvx" Nov 25 10:02:58 crc kubenswrapper[4734]: I1125 10:02:58.247556 4734 scope.go:117] "RemoveContainer" containerID="ed8ac14facbc327a5917dfb1695284bce3a51c6de71726d135485e55b1b0c91d" Nov 25 10:02:58 crc kubenswrapper[4734]: I1125 10:02:58.670861 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" event={"ID":"842336e7-3fca-4ce9-b030-735f9fa84367","Type":"ContainerStarted","Data":"6067035e2bcf8f0ff5f02ef8e05befe0c9f8c49855e486ee27d6a2cd9c3cfa4f"} Nov 25 10:02:58 crc kubenswrapper[4734]: I1125 10:02:58.671457 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 10:02:59 crc kubenswrapper[4734]: I1125 10:02:59.627511 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-7d8c8fd467-kkqb8" Nov 25 10:03:07 crc kubenswrapper[4734]: I1125 10:03:07.981037 4734 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-68875f9666-bkfdd" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.644167 4734 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["keystone-kuttl-tests/keystone77ea-account-delete-hjbmh"] Nov 25 10:03:09 crc kubenswrapper[4734]: E1125 10:03:09.644925 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" containerName="installer" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.644939 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" containerName="installer" Nov 25 10:03:09 crc kubenswrapper[4734]: E1125 10:03:09.644950 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36c08187-0f8e-495f-8cf9-dfbdf7c825d1" containerName="keystone-api" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.644958 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="36c08187-0f8e-495f-8cf9-dfbdf7c825d1" containerName="keystone-api" Nov 25 10:03:09 crc kubenswrapper[4734]: E1125 10:03:09.644973 4734 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.644979 4734 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.645075 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.645102 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="09aaf86b-7cc8-4d2c-a0be-e0b22174c258" containerName="installer" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.645116 4734 memory_manager.go:354] "RemoveStaleState removing state" podUID="36c08187-0f8e-495f-8cf9-dfbdf7c825d1" containerName="keystone-api" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.645553 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.651308 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone77ea-account-delete-hjbmh"] Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.741719 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dce0cd1c-7378-42eb-9322-ecfb13cbb3d9-operator-scripts\") pod \"keystone77ea-account-delete-hjbmh\" (UID: \"dce0cd1c-7378-42eb-9322-ecfb13cbb3d9\") " pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.741794 4734 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t22s4\" (UniqueName: \"kubernetes.io/projected/dce0cd1c-7378-42eb-9322-ecfb13cbb3d9-kube-api-access-t22s4\") pod \"keystone77ea-account-delete-hjbmh\" (UID: \"dce0cd1c-7378-42eb-9322-ecfb13cbb3d9\") " pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.842745 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dce0cd1c-7378-42eb-9322-ecfb13cbb3d9-operator-scripts\") pod \"keystone77ea-account-delete-hjbmh\" (UID: \"dce0cd1c-7378-42eb-9322-ecfb13cbb3d9\") " pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.843155 4734 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t22s4\" (UniqueName: \"kubernetes.io/projected/dce0cd1c-7378-42eb-9322-ecfb13cbb3d9-kube-api-access-t22s4\") pod \"keystone77ea-account-delete-hjbmh\" (UID: \"dce0cd1c-7378-42eb-9322-ecfb13cbb3d9\") " pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.844125 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dce0cd1c-7378-42eb-9322-ecfb13cbb3d9-operator-scripts\") pod \"keystone77ea-account-delete-hjbmh\" (UID: \"dce0cd1c-7378-42eb-9322-ecfb13cbb3d9\") " pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.880973 4734 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t22s4\" (UniqueName: \"kubernetes.io/projected/dce0cd1c-7378-42eb-9322-ecfb13cbb3d9-kube-api-access-t22s4\") pod \"keystone77ea-account-delete-hjbmh\" (UID: \"dce0cd1c-7378-42eb-9322-ecfb13cbb3d9\") " pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" Nov 25 10:03:09 crc kubenswrapper[4734]: I1125 10:03:09.971567 4734 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" Nov 25 10:03:10 crc kubenswrapper[4734]: I1125 10:03:10.167691 4734 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["keystone-kuttl-tests/keystone77ea-account-delete-hjbmh"] Nov 25 10:03:10 crc kubenswrapper[4734]: I1125 10:03:10.762533 4734 generic.go:334] "Generic (PLEG): container finished" podID="dce0cd1c-7378-42eb-9322-ecfb13cbb3d9" containerID="dd705504b527813d7deccf64d2007ec8dec78520e1548ef270cc1e7bafa8885e" exitCode=0 Nov 25 10:03:10 crc kubenswrapper[4734]: I1125 10:03:10.762620 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" event={"ID":"dce0cd1c-7378-42eb-9322-ecfb13cbb3d9","Type":"ContainerDied","Data":"dd705504b527813d7deccf64d2007ec8dec78520e1548ef270cc1e7bafa8885e"} Nov 25 10:03:10 crc kubenswrapper[4734]: I1125 10:03:10.763164 4734 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="keystone-kuttl-tests/keystone77ea-account-delete-hjbmh" event={"ID":"dce0cd1c-7378-42eb-9322-ecfb13cbb3d9","Type":"ContainerStarted","Data":"a16c88b2fd6aabc7d8418e6820860d90b6ed5e7fa2349306e37b0b7c88bbadbe"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111277345024453 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111277345017370 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111272706016507 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111272706015457 5ustar corecore